code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import sys
import warnings
try:
# Use setuptools if available, for install_requires (among other things).
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
# The following code is copied from
# https://github.com/mongodb/mongo-python-driver/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError
from distutils.errors import DistutilsPlatformError, DistutilsExecError
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
build_errors = (CCompilerError, DistutilsExecError,
DistutilsPlatformError, IOError)
else:
build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up websocket masking, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for Tornado to run,
although they do result in significant speed improvements for
websockets.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("Extension modules",
"There was an issue with "
"your platform configuration"
" - see above."))
def build_extension(self, ext):
name = ext.name
if sys.version_info[:3] >= (2, 4, 0):
try:
build_ext.build_extension(self, ext)
except build_errors:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed."))
else:
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"Please use Python >= 2.4 "
"to take advantage of the "
"extension."))
kwargs = {}
version = "0.1.3"
with open("README.rst") as f:
kwargs["long_description"] = f.read()
if setuptools is not None:
# If setuptools is not available, you're on your own for dependencies.
install_requires = ["tornado", "celery", "pika"]
kwargs["install_requires"] = install_requires
setup(
name="totoro",
version=version,
packages=["totoro", "totoro.test", "totoro.test.celery_tasks"],
extras_require={
"redis": ["redis", "tornado-redis"]
},
author="Alex Lee",
author_email="lyd.alexlee.public@gmail.com",
url="https://github.com/Strawhatfy/totoro",
license="http://www.apache.org/licenses/LICENSE-2.0",
description="Celery integration with Tornado",
keywords=['tornado', 'celery', 'amqp', 'redis'],
classifiers=[
'License :: OSI Approved :: Apache Software License',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython"
],
cmdclass={"build_ext": custom_build_ext},
**kwargs
)
| Strawhatfy/totoro | setup.py | Python | apache-2.0 | 4,676 |
from contextlib import contextmanager
import logging
import unittest
from .context import Context
log = logging.getLogger(__name__)
class EphemeralContextTestCase(unittest.TestCase):
def setUp(self):
self.context = Context()
log.debug('XXX Starting context')
self.context.start()
def tearDown(self):
log.debug('XXX Stopping context')
self.context.stop()
@contextmanager
def ephemeral_context(**kw):
context = Context(**kw)
context.start()
yield context
context.stop()
| wickman/compactor | compactor/testing.py | Python | apache-2.0 | 507 |
from models import db
from models.Post import Post
class PostFile(db.Model):
__tablename__ = 'PostFile'
Id = db.Column(db.Integer, primary_key = True)
Post = db.Column(db.Integer, db.ForeignKey(Post.Id))
FileName = db.Column(db.String(128))
def __init__(self, post, file):
self.Post = post
self.FileName = file
| goors/flask-microblog | models/PostFile.py | Python | apache-2.0 | 335 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import collections as _collections
import os as _os
import proton as _proton
import proton.handlers as _handlers
import proton.reactor as _reactor
import uuid as _uuid
import shutil as _shutil
import subprocess as _subprocess
import sys as _sys
import time as _time
import tempfile as _tempfile
import pathlib as _pathlib
class Broker(object):
def __init__(self, scheme, host, port, id=None, user=None, password=None, ready_file=None,
cert=None, key=None, key_password=None, trusted_db=None):
self.scheme = scheme
self.host = host
self.port = port
self.id = id
self.user = user
self.password = password
self.ready_file = ready_file
self.cert = cert
self.key = key
self.key_password = key_password
self.trusted_db = trusted_db
if self.id is None:
self.id = "broker-{0}".format(_uuid.uuid4())
self.container = _reactor.Container(_Handler(self), self.id)
self._config_dir = None
def init(self):
if self.user is not None:
if self.password is None:
self.fail("A password is required for user authentication")
self._init_sasl_config()
if self.scheme == "amqps":
if self.key is None or self.cert is None:
self.fail("if scheme is amqps, key and cert files must be specified")
if not _pathlib.Path(self.key).is_file():
self.fail("key file %s does not exist" % (self.key))
if not _pathlib.Path(self.cert).is_file():
self.fail("cert file %s does not exist" % (self.cert))
if self.trusted_db and not _pathlib.Path(self.trusted_db).is_file():
self.fail("trusted db file %s does not exist" % (self.trusted_db))
def _init_sasl_config(self):
self._config_dir = _tempfile.mkdtemp(prefix="brokerlib-", suffix="")
config_file = _os.path.join(self._config_dir, "proton-server.conf")
sasldb_file = _os.path.join(self._config_dir, "users.sasldb")
_os.environ["PN_SASL_CONFIG_PATH"] = self._config_dir
with open(config_file, "w") as f:
f.write("sasldb_path: {0}\n".format(sasldb_file))
f.write("mech_list: PLAIN SCRAM-SHA-1\n")
command = "echo '{0}' | saslpasswd2 -p -f {1} '{2}'".format \
(self.password, sasldb_file, self.user)
try:
_subprocess.check_call(command, shell=True)
except _subprocess.CalledProcessError as e:
self.fail("Failed adding user to SASL database: {0}", e)
def info(self, message, *args):
pass
def notice(self, message, *args):
pass
def warn(self, message, *args):
pass
def error(self, message, *args):
_sys.stderr.write("{0}\n".format(message.format(*args)))
_sys.stderr.flush()
def fail(self, message, *args):
self.error(message, *args)
_sys.exit(1)
def run(self):
self.container.run()
if _os.path.exists(self._config_dir):
_shutil.rmtree(self.dir, ignore_errors=True)
class _Queue(object):
def __init__(self, broker, address):
self.broker = broker
self.address = address
self.messages = _collections.deque()
self.consumers = _collections.deque()
self.broker.info("Created {0}", self)
def __repr__(self):
return "queue '{0}'".format(self.address)
def add_consumer(self, link):
assert link.is_sender
assert link not in self.consumers
self.consumers.append(link)
self.broker.info("Added consumer for {0} to {1}", link.connection, self)
def remove_consumer(self, link):
assert link.is_sender
try:
self.consumers.remove(link)
except ValueError:
return
self.broker.info("Removed consumer for {0} from {1}", link.connection, self)
def store_message(self, delivery, message):
self.messages.append(message)
self.broker.notice("Stored {0} from {1} on {2}", message, delivery.connection, self)
def forward_messages(self):
credit = sum([x.credit for x in self.consumers])
sent = 0
if credit == 0:
return
while sent < credit:
for consumer in self.consumers:
if consumer.credit == 0:
continue
try:
message = self.messages.popleft()
except IndexError:
self.consumers.rotate(sent)
return
consumer.send(message)
sent += 1
self.broker.notice("Forwarded {0} on {1} to {2}", message, self, consumer.connection)
self.consumers.rotate(sent)
class _Handler(_handlers.MessagingHandler):
def __init__(self, broker):
super(_Handler, self).__init__()
self.broker = broker
self.queues = dict()
self.verbose = False
def on_start(self, event):
interface = "{0}://{1}:{2}".format(self.broker.scheme, self.broker.host, self.broker.port)
if self.broker.scheme == "amqps":
server_ssl_domain = event.container.ssl.server
server_ssl_domain.set_credentials(self.broker.cert, self.broker.key, self.broker.key_password)
if self.broker.trusted_db:
server_ssl_domain.set_trusted_ca_db(self.broker.trusted_db)
server_ssl_domain.set_peer_authentication(_proton.SSLDomain.VERIFY_PEER, self.broker.trusted_db)
else:
server_ssl_domain.set_peer_authentication(_proton.SSLDomain.ANONYMOUS_PEER)
self.acceptor = event.container.listen(interface)
self.broker.notice("Listening for connections on '{0}'", interface)
if self.broker.ready_file is not None:
_time.sleep(0.1) # XXX
with open(self.broker.ready_file, "w") as f:
f.write("ready\n")
def get_queue(self, address):
try:
queue = self.queues[address]
except KeyError:
queue = self.queues[address] = _Queue(self.broker, address)
return queue
def on_link_opening(self, event):
if event.link.is_sender:
if event.link.remote_source.dynamic:
address = "{0}/{1}".format(event.connection.remote_container, event.link.name)
else:
address = event.link.remote_source.address
assert address is not None
event.link.source.address = address
queue = self.get_queue(address)
queue.add_consumer(event.link)
if event.link.is_receiver:
address = event.link.remote_target.address
event.link.target.address = address
def on_link_closing(self, event):
if event.link.is_sender:
queue = self.queues[event.link.source.address]
queue.remove_consumer(event.link)
def on_connection_init(self, event):
event.transport.sasl().allow_insecure_mechs=True
def on_connection_opening(self, event):
# XXX I think this should happen automatically
event.connection.container = event.container.container_id
def on_connection_opened(self, event):
self.broker.notice("Opened connection from {0}", event.connection)
def on_connection_closing(self, event):
self.remove_consumers(event.connection)
def on_connection_closed(self, event):
self.broker.notice("Closed connection from {0}", event.connection)
def on_disconnected(self, event):
self.broker.notice("Disconnected from {0}", event.connection)
self.remove_consumers(event.connection)
def remove_consumers(self, connection):
link = connection.link_head(_proton.Endpoint.REMOTE_ACTIVE)
while link is not None:
if link.is_sender:
queue = self.queues[link.source.address]
queue.remove_consumer(link)
link = link.next(_proton.Endpoint.REMOTE_ACTIVE)
def on_link_flow(self, event):
if event.link.is_sender and event.link.drain_mode:
event.link.drained()
def on_sendable(self, event):
queue = self.get_queue(event.link.source.address)
queue.forward_messages()
def on_settled(self, event):
template = "Container '{0}' {1} {2} to {3}"
container = event.connection.remote_container
source = event.link.source
delivery = event.delivery
if delivery.remote_state == delivery.ACCEPTED:
self.broker.info(template, container, "accepted", delivery, source)
elif delivery.remote_state == delivery.REJECTED:
self.broker.warn(template, container, "rejected", delivery, source)
elif delivery.remote_state == delivery.RELEASED:
self.broker.notice(template, container, "released", delivery, source)
elif delivery.remote_state == delivery.MODIFIED:
self.broker.notice(template, container, "modified", delivery, source)
def on_message(self, event):
message = event.message
delivery = event.delivery
address = event.link.target.address
if address is None:
address = message.address
queue = self.get_queue(address)
queue.store_message(delivery, message)
queue.forward_messages()
#
# def on_unhandled(self, name, event):
# _sys.stderr.write("{0} {1}\n".format(name, event))
# _sys.stderr.flush()
if __name__ == "__main__":
def _print(message, *args):
message = message.format(*args)
_sys.stderr.write("{0}\n".format(message))
_sys.stderr.flush()
class _Broker(Broker):
def info(self, message, *args): _print(message, *args)
def notice(self, message, *args): _print(message, *args)
def warn(self, message, *args): _print(message, *args)
try:
host, port = _sys.argv[1:3]
except IndexError:
_print("Usage: brokerlib <host> <port>")
_sys.exit(1)
try:
port = int(port)
except ValueError:
_print("The port must be an integer")
_sys.exit(1)
broker = _Broker(host, port)
try:
broker.run()
except KeyboardInterrupt:
pass
| tabish121/quiver | python/brokerlib.py | Python | apache-2.0 | 11,350 |
# -*- coding: utf-8 -*-
from model.group import Group
# Stałe dane testowe
testData = [
Group(name='name1', header='header1', footer='footer1'),
Group(name='name2', header='header2', footer='footer2')
]
| Droriel/python_training | data/groups.py | Python | apache-2.0 | 216 |
# -*- coding: utf-8 -*-
from random import randrange
from model.group import Group
import random
import pytest
def test_delete_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name = "test"))
with pytest.allure.step("Given a group list"):
old_groups = db.get_group_list()
with pytest.allure.step("When get random group"):
group = random.choice(old_groups)
with pytest.allure.step("When I delete %s" %group):
app.group.delete_group_by_id(group.id)
with pytest.allure.step("Then the new group list is equal to the old list with the deleted group"):
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) | ainur-fa/python_training_1 | test/test_del_group.py | Python | apache-2.0 | 946 |
#!/usr/bin/env python3
""" Program that convert a pdf to a text file using Tesseract OCR.
The pdf file is first converted to a png file using ghostscript,
then the png file if processed by Tesseract.
"""
import os
import subprocess
import glob
import platform
import argparse
parser = argparse.ArgumentParser(description='Convert pdf files to txt files in the given folder.')
parser.add_argument('folder',
help='folder where the pdf files are stored')
args = parser.parse_args()
input_dic = vars(args)
print('Selected pdf folder: ',input_dic['folder'])
PDF_PATH = input_dic['folder']
#PDF_PATH = '/media/benjamin/Elements/pdfs/'
def png_to_txt(pngpath,short_name,txtpath,log_file):
""" Extract the text from a set of png files.
The png files associated to a single pdf file are numbered according to the page,
they share the same short_name.
"""
png_in = os.path.join(pngpath,short_name)
# Iterate over the pages of the document (different png files)
for pngfile in glob.glob(png_in+'*'):
path,filename = os.path.split(pngfile)
txtfile = filename[0:-4] #+'.txt'
txt_out = os.path.join(txtpath,txtfile)
try:
cmd_png2txt = 'tesseract '+ pngfile +' '+txt_out+ ' -l fra+eng'
proc_results = subprocess.run(cmd_png2txt.split(), stdout=subprocess.PIPE,timeout=60)
if proc_results.returncode:
print('Error encountered with file: {}\n'.format(filename))
with open(log_file, 'a') as logfile:
logfile.write('Error with file: {}\n'.format(filename)) # report errors
else:
print('Text extracted form file: {}'.format(filename))
except:
print('error extracting text with file {}'.format(filename))
with open(log_file, 'a') as logfile:
logfile.write('Error with file (exception raised): {}\n'.format(filename)) # report errors
def pdf_to_png(pdf_file,short_name,png_path,page_limit=4):
""" Convert the pdf to png, each page of the pdf gives a different png file."""
out_name = short_name+'.%d.png'
out_file = os.path.join(png_path,out_name)
if platform.system() == 'Windows':
cmd_pdf2png = ('gswin32c -dSAFER -dNOPAUSE -q -r300x300 -sDEVICE=pnggray -dBATCH -dLastPage=' + str(page_limit) +
' -sOutputFile=' + out_file + ' ' + pdf_file)
else:
cmd_pdf2png = ('gs -dSAFER -dNOPAUSE -q -r300x300 -sDEVICE=pnggray -dBATCH -dLastPage=' + str(page_limit) +
' -sOutputFile=' + out_file + ' ' + pdf_file)
proc_results = subprocess.run(cmd_pdf2png.split(), stdout=subprocess.PIPE,timeout=60)
return proc_results
#PDF_PATH = '/media/benjamin/Elements/pdfs/'
LOG_FILE1 = 'logfile_pdf2png.txt'
LOG_FILE2 = 'logfile_png2txt.txt'
# initiate log file to report errors
with open(LOG_FILE1, 'a') as logfile:
logfile.write('Logfile produced by pdf2txt.py\n')
with open(LOG_FILE2, 'a') as logfile:
logfile.write('Logfile produced by pdf2txt.py\n')
# init paths
png_path = os.path.join(PDF_PATH,'png')
txt_path = os.path.join(PDF_PATH,'txt')
if not os.path.exists(png_path):
os.makedirs(png_path)
if not os.path.exists(txt_path):
os.makedirs(txt_path)
# Loop over all the file in the pdf folder
nb_files = len(list(glob.glob(os.path.join(PDF_PATH,'*.pdf'))))
for idx,pdf_file in enumerate(glob.glob(os.path.join(PDF_PATH,'*.pdf'))):
pdf_path,filename = os.path.split(pdf_file)
print('processing {}. File {}/{}.'.format(filename,idx+1,nb_files))
short_name = filename[0:-4]
try:
proc_results = pdf_to_png(pdf_file,short_name,png_path,page_limit=4)
if proc_results.returncode:
print('Error encountered with file: {}\n'.format(filename))
with open(LOG_FILE1, 'a') as logfile:
logfile.write('Error with file: {}\n'.format(filename)) # report errors
else:
png_to_txt(png_path,short_name,txt_path,LOG_FILE2)
except subprocess.TimeoutExpired:
print('!!!!!! Timed out for file {} !!!!!!'.format(filename))
with open(LOG_FILE1, 'a') as logfile:
logfile.write('Timed out with file: {}\n'.format(filename)) # report time out
| bricaud/OCR-classif | pdf2txt.py | Python | apache-2.0 | 3,948 |
# #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
resources.storage.FileShare
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Microsoft Azure Storage File Share interface
"""
from msrestazure.azure_exceptions import CloudError
from cloudify.decorators import operation
from cloudify.exceptions import (RecoverableError, NonRecoverableError)
from azure_sdk.resources.storage.file_share import FileShare
from azure_sdk.resources.storage.storage_account import StorageAccount
from cloudify_azure import (constants, utils, decorators)
@operation(resumable=True)
@decorators.with_generate_name(FileShare)
@decorators.with_azure_resource(FileShare)
def create(ctx, **_):
"""Creates an Azure File Share"""
share_name = utils.get_resource_name(ctx)
res_cfg = ctx.node.properties.get("resource_config", {})
metadata = res_cfg.get('metadata')
share_quota = res_cfg.get('quota')
storage_account = utils.get_storage_account(ctx)
resource_group_name = utils.get_resource_group(ctx)
azure_config = utils.get_client_config(ctx.node.properties)
keys = StorageAccount(azure_config, ctx.logger).list_keys(
resource_group_name, storage_account)
if not keys or not keys.get("key1"):
raise RecoverableError(
'StorageAccount reported no usable authentication keys')
# Get an interface to the Storage Account
storage_account_key = keys.get("key1")
# Get an interface to the File Share
api_version = \
ctx.node.properties.get('api_version',
constants.API_VER_STORAGE_FILE_SHARE)
file_share = FileShare(azure_config, ctx.logger, api_version)
try:
result = \
file_share.create(resource_group_name,
storage_account,
share_name,
metadata,
share_quota)
except CloudError as cr:
raise NonRecoverableError(
"create file share '{0}' "
"failed with this error : {1}".format(share_name,
cr.message)
)
ctx.instance.runtime_properties['quota'] = share_quota
ctx.instance.runtime_properties['metadata'] = metadata
ctx.instance.runtime_properties['storage_account'] = storage_account
ctx.instance.runtime_properties['username'] = storage_account
ctx.instance.runtime_properties['password'] = storage_account_key
ctx.instance.runtime_properties['uri'] = '{0}.file.{1}/{2}'.format(
storage_account, constants.CONN_STORAGE_ENDPOINT, share_name
)
utils.save_common_info_in_runtime_properties(
resource_group_name=resource_group_name,
resource_name=share_name,
resource_get_create_result=result)
@operation(resumable=True)
@decorators.with_azure_resource(FileShare)
def delete(ctx, **_):
"""Deletes a File Share"""
# Delete the resource
azure_config = utils.get_client_config(ctx.node.properties)
resource_group_name = utils.get_resource_group(ctx)
share_name = utils.get_resource_name(ctx)
storage_account = ctx.instance.runtime_properties.get(
'storage_account') or utils.get_storage_account(ctx)
api_version = \
ctx.node.properties.get('api_version',
constants.API_VER_STORAGE_FILE_SHARE)
file_share = FileShare(azure_config, ctx.logger, api_version)
try:
file_share.delete(resource_group_name,
storage_account,
share_name)
utils.runtime_properties_cleanup(ctx)
except CloudError as cr:
raise NonRecoverableError(
"delete file share '{0}' "
"failed with this error : {1}".format(share_name,
cr.message))
| cloudify-cosmo/cloudify-azure-plugin | cloudify_azure/resources/storage/file.py | Python | apache-2.0 | 4,418 |
import pw19.__main__
if __name__ == "__main__":
pw19.__main__.main()
| SafPlusPlus/pyweek19 | run_game.py | Python | apache-2.0 | 73 |
# Copyright 2016 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron._i18n import _
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db.common_db_mixin import CommonDbMixin
from neutron.extensions import securitygroup as ext_sg
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
from nuage_neutron.plugins.common import base_plugin
from nuage_neutron.plugins.common import constants
from nuage_neutron.plugins.common import exceptions as nuage_exc
from nuage_neutron.plugins.common.time_tracker import TimeTracker
from nuage_neutron.plugins.common import utils as nuage_utils
from nuage_neutron.vsdclient.common import cms_id_helper
from nuage_neutron.vsdclient import restproxy
LOG = logging.getLogger(__name__)
class NuageSecurityGroup(base_plugin.BaseNuagePlugin,
CommonDbMixin):
def __init__(self):
super(NuageSecurityGroup, self).__init__()
self._l2_plugin = None
@property
def core_plugin(self):
if self._l2_plugin is None:
self._l2_plugin = directory.get_plugin()
return self._l2_plugin
def register(self):
self.nuage_callbacks.subscribe(self.post_port_create,
resources.PORT, constants.AFTER_CREATE)
self.nuage_callbacks.subscribe(self.post_port_update,
resources.PORT, constants.AFTER_UPDATE)
self.nuage_callbacks.subscribe(self.post_port_delete,
resources.PORT, constants.AFTER_DELETE)
registry.subscribe(self.pre_delete_security_group,
resources.SECURITY_GROUP,
events.BEFORE_DELETE)
registry.subscribe(self.pre_create_security_group_rule,
resources.SECURITY_GROUP_RULE,
events.BEFORE_CREATE)
registry.subscribe(self.post_create_security_group_rule,
resources.SECURITY_GROUP_RULE,
events.AFTER_CREATE)
registry.subscribe(self.pre_delete_security_group_rule,
resources.SECURITY_GROUP_RULE,
events.BEFORE_DELETE)
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def pre_delete_security_group(self, resource, event, trigger, **kwargs):
self.vsdclient.delete_nuage_secgroup(kwargs['security_group_id'])
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def pre_create_security_group_rule(self, resource, event, trigger,
**kwargs):
self.vsdclient.validate_nuage_sg_rule_definition(
kwargs['security_group_rule'])
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def post_create_security_group_rule(self, resource, event, trigger,
**kwargs):
remote_sg = None
context = kwargs['context']
sg_rule = kwargs['security_group_rule']
sg_id = sg_rule['security_group_id']
if sg_rule.get('remote_group_id'):
remote_sg = self.core_plugin.get_security_group(
context, sg_rule.get('remote_group_id'))
try:
nuage_policygroup = self.vsdclient.get_sg_policygroup_mapping(
sg_id)
if nuage_policygroup:
sg_params = {
'sg_id': sg_id,
'neutron_sg_rule': sg_rule,
'policygroup': nuage_policygroup
}
if remote_sg:
sg_params['remote_group_name'] = remote_sg['name']
self.vsdclient.create_nuage_sgrule(sg_params)
except Exception:
with excutils.save_and_reraise_exception():
self.core_plugin.delete_security_group_rule(context,
sg_rule['id'])
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def pre_delete_security_group_rule(self, resource, event, trigger,
**kwargs):
context = kwargs['context']
id = kwargs['security_group_rule_id']
local_sg_rule = self.core_plugin.get_security_group_rule(context, id)
self.vsdclient.delete_nuage_sgrule([local_sg_rule])
@TimeTracker.tracked
def post_port_create(self, resource, event, trigger, **kwargs):
context = kwargs['context']
port = kwargs['port']
subnet_mapping = kwargs['subnet_mapping']
if subnet_mapping['nuage_managed_subnet']:
return
vsd_subnet = self.vsdclient.get_nuage_subnet_by_id(subnet_mapping)
if port[ext_sg.SECURITYGROUPS]:
self._process_port_security_group(context,
port,
kwargs['vport'],
port[ext_sg.SECURITYGROUPS],
vsd_subnet)
@TimeTracker.tracked
def post_port_update(self, resource, event, trigger, **kwargs):
update_sg = True
context = kwargs['context']
updated_port = kwargs['updated_port']
original_port = kwargs['original_port']
rollbacks = kwargs['rollbacks']
subnet_mapping = kwargs['subnet_mapping']
if subnet_mapping['nuage_managed_subnet']:
return
new_sg = (set(updated_port.get(ext_sg.SECURITYGROUPS)) if
updated_port.get(ext_sg.SECURITYGROUPS) else set())
orig_sg = (set(original_port.get(ext_sg.SECURITYGROUPS)) if
original_port.get(ext_sg.SECURITYGROUPS) else set())
if not new_sg and new_sg == orig_sg:
update_sg = False
if update_sg:
vsd_subnet = self.vsdclient.get_nuage_subnet_by_id(subnet_mapping)
self._process_port_security_group(context,
updated_port,
kwargs['vport'],
new_sg,
vsd_subnet)
rollbacks.append((self._process_port_security_group,
[context, updated_port, kwargs['vport'],
original_port[ext_sg.SECURITYGROUPS],
vsd_subnet],
{}))
deleted_sg_ids = (set(original_port[ext_sg.SECURITYGROUPS]) -
set(updated_port[ext_sg.SECURITYGROUPS]))
self.vsdclient.check_unused_policygroups(deleted_sg_ids)
@TimeTracker.tracked
def post_port_delete(self, resource, event, trigger, **kwargs):
port = kwargs['port']
subnet_mapping = kwargs['subnet_mapping']
if subnet_mapping['nuage_managed_subnet']:
return
securitygroups = port.get(ext_sg.SECURITYGROUPS, [])
successful = False
attempt = 1
while not successful:
try:
self.vsdclient.check_unused_policygroups(securitygroups)
successful = True
except restproxy.RESTProxyError as e:
msg = e.msg.lower()
if (e.code not in (404, 409) and 'policygroup' not in msg and
'policy group' not in msg):
raise
elif attempt < 3:
attempt += 1
else:
raise
@log_helpers.log_method_call
def _process_port_security_group(self, context, port, vport, sg_ids,
vsd_subnet):
if len(sg_ids) > 6:
msg = (_("Exceeds maximum num of security groups on a port "
"supported on nuage VSP"))
raise nuage_exc.NuageBadRequest(msg=msg)
if not port.get('fixed_ips'):
return
successful = False
attempt = 1
max_attempts = 3
while not successful:
try:
policygroup_ids = []
for sg_id in sg_ids:
vsd_policygroup = self._find_or_create_policygroup(
context, sg_id, vsd_subnet)
policygroup_ids.append(vsd_policygroup['ID'])
self.vsdclient.update_vport_policygroups(vport['ID'],
policygroup_ids)
successful = True
except restproxy.RESTProxyError as e:
msg = e.msg.lower()
if (e.code not in (404, 409) and 'policygroup' not in msg and
'policy group' not in msg):
raise
elif attempt < max_attempts:
attempt += 1
else:
LOG.debug("Retry failed %s times.", max_attempts)
raise
def _find_or_create_policygroup(self, context, security_group_id,
vsd_subnet):
external_id = cms_id_helper.get_vsd_external_id(security_group_id)
if vsd_subnet['type'] == constants.L2DOMAIN:
policygroups = self.vsdclient.get_nuage_l2domain_policy_groups(
vsd_subnet['ID'],
externalID=external_id)
else:
domain_id = self.vsdclient.get_router_by_domain_subnet_id(
vsd_subnet['ID'])
policygroups = self.vsdclient.get_nuage_domain_policy_groups(
domain_id,
externalID=external_id)
if len(policygroups) > 1:
msg = _("Found multiple policygroups with externalID %s")
raise n_exc.Conflict(msg=msg % external_id)
elif len(policygroups) == 1:
return policygroups[0]
else:
return self._create_policygroup(context, security_group_id,
vsd_subnet)
def _create_policygroup(self, context, security_group_id, vsd_subnet):
security_group = self.core_plugin.get_security_group(context,
security_group_id)
# pop rules, make empty policygroup first
security_group_rules = security_group.pop('security_group_rules')
policy_group = self.vsdclient.create_security_group(vsd_subnet,
security_group)
# Before creating rules, we might have to make other policygroups first
# if the rule uses remote_group_id to have rule related to other PG.
for rule in security_group_rules:
remote_sg_id = rule.get('remote_group_id')
if remote_sg_id:
self._find_or_create_policygroup(context,
remote_sg_id,
vsd_subnet)
self.vsdclient.create_security_group_rules(policy_group,
security_group_rules)
return policy_group
| naveensan1/nuage-openstack-neutron | nuage_neutron/plugins/nuage_ml2/securitygroup.py | Python | apache-2.0 | 12,033 |
__author__ = 'tiramola group'
import os, datetime, operator, math, random, itertools, time
import numpy as np
from lib.fuzz import fgraph, fset
from scipy.cluster.vq import kmeans2
from lib.persistance_module import env_vars
from scipy.stats import linregress
from collections import deque
from lib.tiramola_logging import get_logger
from Predictor import Predictor
class RLDecisionMaker:
def __init__(self, cluster):
#Create logger
LOG_FILENAME = 'files/logs/Coordinator.log'
self.log = get_logger('RLDecisionMaker', 'INFO', logfile=LOG_FILENAME)
self.log.info("Using 'gain' : " + env_vars['gain'] +" with threshold of "+str( env_vars["decision_threshold"]*100) + "% and interval: " + str(env_vars['decision_interval']))
self.log.info("Cluster Size from %d to %d nodes" % (env_vars['min_cluster_size'], env_vars['max_cluster_size']))
self.debug = False
if self.debug:
self.currentState = 8
else:
self.currentState = cluster.node_count()
self.cluster = cluster
self.nextState = self.currentState
self.waitForIt = env_vars['decision_interval'] / env_vars['metric_fetch_interval']
self.pending_action = None
self.decision = {"action": "PASS", "count": 0}
# The policy for getting throughput and latency when computing the reward func.
# average, centroid
self.measurementsPolicy = 'centroid'
self.prediction = env_vars['use_prediction']
self.predictor = Predictor()
# used only in simulation!!
self.countdown = 0
# A dictionary that will remember rewards and metrics in states previously visited
self.memory = {}
for i in range(env_vars["min_cluster_size"], env_vars["max_cluster_size"] + 1):
self.memory[str(i)] = {}
#self.memory[str(i)]['V'] = None # placeholder for rewards and metrics
self.memory[str(i)]['r'] = None
self.memory[str(i)]['arrayMeas'] = None
# Load any previous statics.
self.measurementsFile = env_vars["measurements_file"]
self.trainingFile = env_vars["training_file"]
self.sumMetrics = {}
# initialize measurements file
meas = open(self.measurementsFile, 'a+')
if os.stat(self.measurementsFile).st_size == 0:
# The file is empty, set the headers for each column.
meas.write('State\t\tLambda\t\tThroughput\t\tLatency\t\tCPU\t\tTime\n')
meas.close()
# load training set
meas = open(self.trainingFile, 'r+')
if os.stat(self.trainingFile).st_size != 0:
# Read the training set measurements saved in the file.
meas.next() # Skip the first line with the headers of the columns
for line in meas:
# Skip comments (used in training sets)
if not line.startswith('###'):
m = line.split('\t\t')
self.add_measurement(m)
meas.close()
def add_measurement(self, metrics, write_file=False, write_mem=True):
"""
adds the measurement to either memory or file or both
@param metrics: array The metrics to store. An array containing [state, lamdba, throughput, latency, time]
@param writeFile: boolean If set write the measurement in the txt file
:return:
"""
if self.measurementsPolicy.startswith('average'):
if not self.sumMetrics.has_key(metrics[0]):
# Save the metric with the state as key metrics = [state, inlambda, throughput, latency]
self.sumMetrics[metrics[0]] = {'inlambda': 0.0, 'throughput': 0.0, 'latency': 0.0, 'divide_by': 0}
self.sumMetrics[metrics[0]] = {'inlambda': self.sumMetrics[metrics[0]]['inlambda'] + float(metrics[1]),
'throughput': self.sumMetrics[metrics[0]]['throughput'] + float(metrics[2]),
'latency': self.sumMetrics[metrics[0]]['latency'] + float(metrics[3]),
'divide_by': self.sumMetrics[metrics[0]]['divide_by'] + 1}
if self.debug and write_file:
self.log.debug("add_measurements: won't load measurement to memory")
else:
if write_mem:
# metrics-> 0: state, 1: lambda, 2: thoughtput, 3:latency, 4:cpu, 5:time
if not self.memory.has_key(metrics[0]):
self.memory[str(metrics[0])] = {}
#self.memory[str(metrics[0])]['V'] = None # placeholder for rewards and metrics
self.memory[str(metrics[0])]['r'] = None
self.memory[str(metrics[0])]['arrayMeas'] = np.array([float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])], ndmin=2)
elif self.memory[metrics[0]]['arrayMeas'] is None:
self.memory[metrics[0]]['arrayMeas'] = np.array([float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])], ndmin=2)
else:
self.memory[metrics[0]]['arrayMeas'] = np.append(self.memory[metrics[0]]['arrayMeas'],
[[float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])]], axis=0)
# but add 1 zero measurement for each state for no load cases ??? too many 0s affect centroids?
if write_file:
if write_mem:
used = "Yes"
else:
used = "No"
ms = open(self.measurementsFile, 'a')
# metrics[5] contains the time tick -- when running a simulation, it represents the current minute,
# on actual experiments, it is the current time. Used for debugging and plotting
ms.write(str(metrics[0]) + '\t\t' + str(metrics[1]) + '\t\t' + str(metrics[2]) + '\t\t' +
str(metrics[3]) + '\t\t' + str(metrics[4]) + '\t\t' + str(metrics[5]) + '\t\t'+ used+'\n')
ms.close()
# param state: string Get the average metrics (throughput, latency) for this state.
# return a dictionary with the averages
def get_averages(self, state):
averages = {}
if self.sumMetrics.has_key(state):
averages['throughput'] = float(self.sumMetrics[state]['throughput'] / self.sumMetrics[state]['divide_by'])
averages['latency'] = float(self.sumMetrics[state]['latency'] / self.sumMetrics[state]['divide_by'])
self.log.debug("GETAVERAGES Average metrics for state: " + state + " num of measurements: " + str(
self.sumMetrics[state]['divide_by']) +
" av. throughput: " + str(averages['throughput']) + " av. latency: " +
str(averages['latency']))
return averages
def doKmeans(self, state, from_inlambda, to_inlambda):
# Run kmeans for the measurements of this state and return the centroid point (throughput, latency)
ctd = {}
label = []
centroids = {}
if self.memory[state]['arrayMeas'] != None:
count_state_measurements = len(self.memory[state]['arrayMeas'])
# self.log.debug("DOKMEANS " + str(len(self.memory[state]['arrayMeas'])) +
# " measurements available for state " + state)
sliced_data = None
for j in self.memory[state]['arrayMeas']:
#self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# If this measurement belongs in the slice we're insterested in
if j[0] >= from_inlambda and j[0] <= to_inlambda:
#self.my_logger.debug("DOKMEANS adding measurement : "+ str(j))
# add it
if sliced_data == None:
sliced_data = np.array(j, ndmin=2)
else:
sliced_data = np.append(sliced_data, [j], axis=0)
k = 1 # number of clusters
# 1. No known lamdba values close to current lambda measurement
if sliced_data == None:
# Check if there are any known values from +-50% inlambda.
# original_inlambda = float(from_inlambda* (10/9))
# from_inlambda = 0.8 * original_inlambda
# to_inlambda = 1.2 * original_inlambda
# self.my_logger.debug("Changed lambda range to +- 20%: "+ str(from_inlambda) + " - "+ str(to_inlambda))
# for j in self.memory[state]['arrayMeas']:
# #self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# # If this measurement belongs in the slice we're insterested in
# if j[0] >= from_inlambda and j[0] <= to_inlambda:
# # add it
# if sliced_data == None:
# sliced_data = np.array(j, ndmin=2)
# else:
# sliced_data = np.append(sliced_data, [j], axis=0)
# #centroids, label = kmeans2(self.memory[state]['arrayMeas'], k, minit='points') # (obs, k)
# #else:
# if sliced_data == None:
self.log.debug("No known lamdba values close to current lambda measurement. Returning zeros!")
else:
# self.log.debug("DOKMEANS length of sliced_data to be fed to kmeans: " + str(len(sliced_data))
# + " (out of %d total)" % count_state_measurements)
centroids, label = kmeans2(sliced_data, k, minit='points')
pass
# initialize dictionary
num_of_meas = {}
#num_of_meas = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0}
for j in range(0, k):
num_of_meas[str(j)] = 0
if len(label) > 0:
for i in label:
num_of_meas[str(i)] += 1
max_meas_cluster = max(num_of_meas.iteritems(), key=operator.itemgetter(1))[0]
# self.my_logger.debug("DOKMEANS state: "+ state +" kmeans2 centroids: "+ str(centroids) +" label: "+
# str(num_of_meas) + " cluster with max measurements: "+ str(max_meas_cluster))
ctd['inlambda'] = centroids[int(max_meas_cluster)][0]
ctd['throughput'] = centroids[int(max_meas_cluster)][1]
ctd['latency'] = centroids[int(max_meas_cluster)][2]
ctd['cpu'] = centroids[int(max_meas_cluster)][3]
else:
#self.log.debug("DOKMEANS one of the clusters was empty and so label is None :|. Returning zeros")
ctd['inlambda'] = 0.0
ctd['throughput'] = 0.0
ctd['latency'] = 0.0
ctd['cpu'] = 0.0
#return None
else:
self.log.debug("DOKMEANS self.memory[state]['arrayMeas'] is None :|")
return ctd
def moving_average(self, iterable, n=3):
# moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
# http://en.wikipedia.org/wiki/Moving_average
it = iter(iterable)
d = deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n)
def predict_load(self):
# Linear Regression gia na doume to slope
stdin, stdout = os.popen2("tail -n 20 " + self.measurementsFile)
stdin.close()
lines = stdout.readlines();
stdout.close()
ten_min_l = [] # store past 10 mins lambda's
ten_min = [] # store past 10 mins ticks
for line in lines:
m = line.split('\t\t') # state, lambda, throughput, latency, cpu, time tick
ten_min_l.append(float(m[1]))
ten_min.append(float(m[5]))
# run running average on the 10 mins lambda measurements
n = 5
run_avg_gen = self.moving_average(ten_min_l, n)
run_avg = []
for r in run_avg_gen:
run_avg.append(float(r))
ten_min_ra = ten_min[2:18] # np.arange(i-8, i-2, 1)
# linear regression on the running average
#(slope, intercept, r_value, p_value, stderr) = linregress(ten_min, ten_min_l)
(slope, intercept, r_value, p_value, stderr) = linregress(ten_min_ra, run_avg)
# fit the running average in a polynomial
coeff = np.polyfit(ten_min, ten_min_l, deg=2)
self.log.debug("Slope (a): " + str(slope) + " Intercept(b): " + str(intercept))
self.log.debug("Polynom coefficients: " + str(coeff))
#self.my_logger.debug("next 10 min prediction "+str(float(slope * (p + 10) + intercept + stderr)))
predicted_l = float(slope * (ten_min[19] + 10) + intercept + stderr) # lambda in 10 mins from now
#predicted_l = np.polyval(coeff, (ten_min[9] + 10)) # lambda in 10 mins from now
if slope > 0:
#if predicted_l > allmetrics['inlambda'] :
dif = 6000 + 10 * int(slope)
#dif = 6000 + 0.2 * int(predicted_l - allmetrics['inlambda'])
self.log.debug("Positive slope: " + str(slope) + " dif: " + str(dif)
+ ", the load is increasing. Moving the lambda slice considered 3K up")
else:
dif = -6000 + 10 * int(slope)
#dif = -6000 + 0.2 * int(predicted_l - allmetrics['inlambda'])
self.log.debug("Negative slope " + str(slope) + " dif: " + str(dif)
+ ", the load is decreasing. Moving the lambda slice considered 3K down")
#dif = ((predicted_l - allmetrics['inlambda'])/ allmetrics['inlambda']) * 0.1 * 6000#* allmetrics['inlambda']
#dif = int((predicted_l / allmetrics['inlambda']) * 6000)
return predicted_l
def publish_to_local_ganglia(self, allmetrics):
"""
Publishes monitoring data to local ganglia agent
:param allmetrics:
:return:
"""
self.log.debug( "TAKEDECISION allmetrics: " + str(allmetrics))
#Publish measurements to ganglia
try:
os.system("gmetric -n ycsb_inlambda -v " + str(
allmetrics['inlambda']) + " -d 15 -t float -u 'reqs/sec' -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
os.system("gmetric -n ycsb_throughput -v " + str(
allmetrics['throughput']) + " -d 15 -t float -u 'reqs/sec' -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
os.system(
"gmetric -n ycsb_latency -v " + str(allmetrics['latency']) + " -d 15 -t float -u ms -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
except:
pass
def handle_metrics(self, client_metrics, server_metrics):
# read metrics
allmetrics = {'inlambda': 0, 'throughput': 0, 'latency': 0, 'cpu': 0}
if not self.debug:
## Aggreggation of YCSB client metrics
clients = 0
servers = 0
# We used to collect server cpu too, do we need it?
#self.log.debug("TAKEDECISION state: %d, pending action: %s. Collecting metrics" % (self.currentState, str(self.pending_action)))
for host in client_metrics.keys():
metric = client_metrics[host]
if isinstance(metric, dict):
for key in metric.keys():
if key.startswith('ycsb_TARGET'):
allmetrics['inlambda'] += float(metric[key])
elif key.startswith('ycsb_THROUGHPUT'):
allmetrics['throughput'] += float(metric[key])
elif key.startswith('ycsb_READ') or key.startswith('ycsb_UPDATE') or key.startswith(
'ycsb_RMW') or key.startswith('ycsb_INSERT'):
allmetrics['latency'] += float(metric[key])
clients += 1
for host in server_metrics.keys():
metric = server_metrics[host]
if isinstance(metric, dict):
#check if host in active cluster hosts
if not host in self.cluster.get_hosts().keys():
continue
servers += 1
for key in metric.keys():
if key.startswith('cpu_idle'):
allmetrics['cpu'] += float(metric[key])
try:
allmetrics['latency'] = allmetrics['latency'] / clients
except:
allmetrics['latency'] = 0
try:
allmetrics['cpu'] = (allmetrics['cpu'] / servers) # average node cpu usage
except:
allmetrics['cpu'] = 0
else:
self.log.info("Running in DEBUG mode, no metrics retrieved!")
return allmetrics
# a log-related variable
pending_action_logged = False
def take_decision(self, client_metrics, server_metrics):
'''
this method reads allmetrics object created by Monitoring.py and decides whether a change
of the number of participating
virtual nodes is due.
'''
# update prediction current minute counter
self.predictor.tick_tock()
if client_metrics is None or server_metrics is None: return
# first parse all metrics
allmetrics = self.handle_metrics(client_metrics, server_metrics)
#self.publish_to_local_ganglia(allmetrics)
pending_action = not (self.pending_action is None) # true if there is no pending action
# 1. Save the current metrics to file and in memory only if there is no pending action.
self.add_measurement([str(self.currentState), allmetrics['inlambda'], allmetrics['throughput'],
allmetrics['latency'], allmetrics['cpu'],
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
write_file=True, write_mem=((not pending_action) and bool(env_vars['update_metrics'])))
# if there is a pending action, don't take a decision
if pending_action:
global pending_action_logged
if not pending_action_logged:
self.log.debug("Last action " + self.pending_action + " hasn't finished yet, see you later!")
pending_action_logged = True
if self.debug:
if self.countdown == 0:
self.log.debug("Running a simulation, set state from " + str(self.currentState) + " to " +
str(self.nextState))
self.currentState = self.nextState
self.pending_action = None
else:
self.countdown -= 1
self.log.debug("Reducing countdown to " + str(self.countdown))
# skip decision
self.decision["action"] = "PASS"
self.decision["count"] = 0
return self.decision
pending_action_logged = False
# manage the interval counter (waitForIt)
if self.waitForIt == 0:
self.waitForIt = env_vars['decision_interval'] / env_vars['metric_fetch_interval']
else:
if self.waitForIt == env_vars['decision_interval'] / env_vars['metric_fetch_interval']:
self.log.debug("New decision in " + str(float(self.waitForIt*env_vars['metric_fetch_interval'])/60) +
" mins, see you later!")
self.waitForIt -= 1
self.decision["action"] = "PASS"
self.decision["count"] = 0
return self.decision
# Select values close to the current throughtput, define tha lambda range we're interested in -+ 5%
slice_range=75
from_inlambda = allmetrics['inlambda'] - slice_range
to_inlambda = allmetrics['inlambda'] + slice_range
if self.prediction:
predicted_l = self.predictor.poly_regression()
if predicted_l > 0:
# there are enough data to make a prediction, if not use the actual lambda
self.log.debug(
"Predicted: " + str(predicted_l) + " lambda :" + str(allmetrics['inlambda']))
from_inlambda = predicted_l - slice_range
to_inlambda = predicted_l + slice_range
self.log.debug("TAKEDECISION state %d lambda range: %d - %d" % (self.currentState, from_inlambda, to_inlambda))
# too low to care, the initial num of nodes can answer 1000 req/sec,
# so consider it as 0 1000 * len(cluster.size)!!
if 0.0 < to_inlambda < 1000:
from_inlambda = 0.0
self.log.debug("TAKEDECISION state %d current lambda %d changed lambda range to: %d - %d"
% (self.currentState, allmetrics['inlambda'], from_inlambda, to_inlambda))
# The subgraph we are interested in. It contains only the allowed transitions from the current state.
from_node = max(int(env_vars["min_cluster_size"]), (self.currentState - env_vars["rem_nodes"]))
to_node = min(self.currentState + int(env_vars["add_nodes"]), int(env_vars["max_cluster_size"]))
#self.my_logger.debug("TAKEDECISION creating graph from node: "+ str(from_node) +" to node "+ str(to_node))
#inject the current number of nodes
allmetrics['current_nodes'] = self.currentState
states = fset.FuzzySet()
# Calculate rewards using the values in memory if any, or defaults
for i in range(from_node, to_node + 1):
# se periptwsi pou den exeis 3anadei to state upologizei poso tha ithele na einai to throughput
# allmetrics['max_throughput'] = float(i) * float(self.utils.serv_throughput)
allmetrics['num_nodes'] = i
met = {}
if self.measurementsPolicy.startswith('average'):
met = self.getAverages(str(i))
elif self.measurementsPolicy.startswith('centroid'):
met = self.doKmeans(str(i), from_inlambda, to_inlambda)
#format met output
out_met = {k: int(v) for k,v in met.iteritems()}
self.log.debug("TAKEDECISION state: " + str(i) + " met: " + str(out_met))
if met != None and len(met) > 0:
# Been in this state before, use the measurements
allmetrics['inlambda'] = met['inlambda']
allmetrics['throughput'] = met['throughput']
allmetrics['latency'] = met['latency']
allmetrics['cpu'] = met['cpu']
#self.my_logger.debug("TAKEDECISION adding visited state "+ str(i) +" with gain "+ str(self.memory[str(i)]['r']))
#else:
# No clue for this state use current measurements...
#self.my_logger.debug("TAKEDECISION unknown state "+ str(i) +" with gain "+ str(self.memory[str(i)]['r']))
self.memory[str(i)]['r'] = eval(env_vars["gain"], allmetrics)
# if self.currentState != i:
# self.my_logger.debug(
# "TAKEDECISION adding state " + str(i) + " with gain " + str(self.memory[str(i)]['r']))
states.add(fset.FuzzyElement(str(i), self.memory[str(i)]['r']))
# For the current state, use current measurement
# if self.currentState == i:
# if not self.debug:
# cur_gain = eval(env_vars["gain"], allmetrics)
# # for debugging purposes I compare the current reward with the one computed using the training set
# self.log.debug("TAKEDECISION state %d current reward: %d training set reward: %d"
# % (self.currentState, cur_gain, self.memory[str(i)]['r']))
# self.memory[str(i)]['r'] = cur_gain
# #self.log.debug("TAKEDECISION adding current state " + str(i) + " with gain " + str(cur_gain))
# else:
# cur_gain = (self.memory[str(i)]['r'])
# self.log.debug("TAKEDECISION state %d current state training set reward: %d"
# % (self.currentState, cur_gain))
#
# states.add(fset.FuzzyElement(str(i), cur_gain))
# Create the transition graph
v = []
for i in states.keys():
v.append(i)
v = set(v)
stategraph = fgraph.FuzzyGraph(viter=v, directed=True)
for j in range(from_node, to_node + 1):
if j != self.currentState:
# Connect nodes with allowed transitions from the current node.connect(tail, head, mu) head--mu-->tail
stategraph.connect(str(j), str(self.currentState), eval(env_vars["trans_cost"], allmetrics))
#self.my_logger.debug(
# "TAKEDECISION connecting state " + str(self.currentState) + " with state " + str(j))
# Connect nodes with allowed transitions from node j.
#for k in range(max(int(env_vars["min_cluster_size"]), j - int(env_vars["rem_nodes"])),
# min(j + int(env_vars["add_nodes"]), int(env_vars["max_cluster_size"])+1)):
# if k != j:
# self.my_logger.debug("TAKEDECISION connecting state "+ str(j) +" with state "+ str(k))
# stategraph.connect(str(k), str(j), eval(env_vars["trans_cost"], allmetrics))
#Calculate the V matrix for available transitions
V = {}
for s in range(from_node, to_node + 1):
# Get allowed transitions from this state.
if self.memory[str(s)]['r'] != None:
# For each state s, we need to calculate the transitions allowed.
#allowed_transitions = stategraph.edges(head=str(s))
#Vs = []
# for t in allowed_transitions:
# t[0] is the tail state of the edge (the next state)
# No V from last run
#if self.memory[t[0]]['V'] == None:
# self.memory[t[0]]['V'] = self.memory[t[0]]['r']
# Vs.append(self.memory[t[0]]['r'])
# self.my_logger.debug("TAKEDECISION tail state: "+ t[0] +" head state: "+
# t[1] +" V("+t[0]+") = "+ str(self.memory[t[0]]['V']))
# self.my_logger.debug("TAKEDECISION transition cost from state:"+ str(t[1]) +" to state: "+ str(t[0]) +
# " is "+ str(stategraph.mu(t[1],t[0])))
# The original algo uses previous values of max reward (+ gamma * previous max), we don't
# if len(Vs) > 0:
# V[s] = self.memory[str(s)]['r'] + float(self.utils.gamma) * max(Vs)
# else:
# V[s] = self.memory[str(s)]['r']
V[s] = self.memory[str(s)]['r']
self.log.debug("TAKEDECISION Vs="+str(V))
# Find the max V (the min state with the max value)
max_gain = max(V.values())
max_set = [key for key in V if V[key] == max_gain]
self.log.debug("max set: "+str(max_set))
self.nextState = min(max_set)
self.log.debug("max(V): %d (GAIN=%d)" % (self.nextState, V[self.nextState]))
#self.my_logger.debug("TAKEDECISION next state: "+ str(self.nextState))
# Remember the V values calculated ???
#for i in V.keys():
# self.memory[str(i)]['V'] = V[i]
# self.my_logger.debug("TAKEDECISION V("+ str(i) +") = "+ str(V[i]))
# vis = fuzz.visualization.VisManager.create_backend(stategraph)
# (vis_format, data) = vis.visualize()
#
# with open("%s.%s" % ("states", vis_format), "wb") as fp:
# fp.write(data)
# fp.flush()
# fp.close()
if self.nextState != self.currentState:
self.log.debug("Decided to change state to_next: " + str(self.nextState) + " from_curr: " + str(self.currentState))
# You've chosen to change state, that means that nextState has a greater reward, therefore d is always > 0
current_reward = self.memory[str(self.currentState)]['r']
d = self.memory[str(self.nextState)]['r'] - current_reward
self.log.debug( "Difference is " + str(d) + " abs thres="+str(env_vars['decision_abs_threshold'])+" gte:"+str(float(d) < env_vars['decision_abs_threshold']))
if (current_reward != 0 and (abs(float(d) / current_reward) < env_vars['decision_threshold']))\
or float(d) < env_vars['decision_abs_threshold']:
#false alarm, stay where you are
self.nextState = self.currentState
# skip decision
self.decision["action"] = "PASS"
self.decision["count"] = 0
self.log.debug("ups changed my mind...staying at state: " + str(self.currentState) +
" cause the gain difference is: " + str(abs(d)) +
" which is less than %d%% of the current reward, it's actually %f%%" % (int(100*env_vars['decision_threshold']) ,abs(float(d)*100) / (float(current_reward)+0.001)))
else:
self.log.debug("Difference "+ str(d) + " is greater than threshold ("+str(env_vars['decision_threshold'])+"). Keeping decision")
# If the reward is the same with the state you're in, don't move
# elif (d == 0):
# #false alarm, stay where you are
# self.nextState = self.currentState
# # skip decision
# self.decision["action"] = "PASS"
# self.decision["count"] = 0
# self.log.debug("ups changed my mind...staying at state: " + str(self.currentState) +
# " cause the gain difference is: " + str(abs(d)) +
# " which is less than 10% of the current reward "
# + str(self.memory[str(self.currentState)]['r']))
if self.nextState > self.currentState:
self.decision["action"] = "ADD"
elif self.nextState < self.currentState:
self.decision["action"] = "REMOVE"
self.decision["count"] = abs(int(self.currentState) - int(self.nextState))
#self.log.debug("TAKEDECISION: action " + self.decision["action"] + " " + str(self.decision["count"]) +
# " nodes.")
## Don't perform the action if we're debugging/simulating!!!
if self.debug:
if self.pending_action is None and not self.decision["action"].startswith("PASS"):
self.pending_action = self.decision['action']
self.countdown = 2 * self.decision['count'] * 60 / env_vars['metric_fetch_interval']
#self.currentState = str(self.nextState)
self.log.debug("TAKEDECISION simulation, action will finish in: " + str(self.countdown) + " mins")
else:
self.log.debug("TAKEDECISION Waiting for action to finish: " + str(self.pending_action))
return self.decision
def simulate(self):
self.log.debug("START SIMULATION!!")
## creates a sin load simulated for an hour
# for i in range(0, 3600, 10):
#for i in range(0, 14400, 60): # 4 hours
for i in range(0, 900, 1):
cpu = max(5, 60 * abs(math.sin(0.05 * math.radians(i))) - int(self.currentState))
# lamdba is the query arrival rate, throughput is the processed queries
#l = 60000 + 40000 * math.sin(0.01 * i) + random.uniform(-4000, 4000)
#l = 50000 * math.sin(60 * math.radians(i)/40) + 65000 + random.uniform(-8000, 8000)
#l = 40000 * math.sin(60 * math.radians(i)/50) + 45000 + random.uniform(-4000, 4000)
#l = 30000 * math.sin(0.02 * i) + 55000 + random.uniform(-4000, 4000)
l = 60000 * math.sin(0.04 * i) + 75000 + random.uniform(-6000, 6000)
# first 10 mins
# if i < 1200:
# l = 20000
# elif i < 2400:
# l = 40000
# elif i < 4400:
# l = 60000
# elif i < 6000:
# l = 40000
# elif i < 7200:
# l = 20000
maxThroughput = (float(self.currentState) * float(env_vars["serv_throughput"]))
# latency = 200 # msec
# if (l > maxThroughput):
# latency += (l-maxThroughput)/10 # +100msec for every 1000 reqs queued
#throughput = min(maxThroughput, l)# max throughput for the current cluster
throughput = l #(+/- e ??)
latency = 0.0000004 * l ** 2 + 200 # msec...
if l > maxThroughput:
throughput = maxThroughput - 0.01 * l
latency = 0.00001 * (l - maxThroughput) ** 2 + (0.0000004 * maxThroughput ** 2 + 200) # msec... ?
values = {'latency': latency, 'cpu': cpu, 'inlambda': l, 'throughput': throughput,
'num_nodes': self.currentState}
self.log.debug(
"SIMULATE i: " + str(i) + " state: " + str(self.currentState) + " values:" + str(values)
+ " maxThroughput: " + str(maxThroughput))
#nomizw de xreiazetai giati ginetai kai take_decision kai se debug mode
#self.addMeasurement([self.currentState, str(l), str(throughput), str(latency), str(i)], True)
#if self.pending_action[len(self.pending_action)-1] == "done" :
self.take_decision(values)
time.sleep(1)
return
def simulate_training_set(self):
# run state 12 lambdas
self.log.debug("START SIMULATION!!")
self.debug = True
load = []
for k in range(9, 19):
for j in self.memory[str(k)]['arrayMeas']:
load.append(j[0])
#for i in range(0, 120, 1): # paizei? 1 wra ana miso lepto
for i in range(0, 240*12, 1):
l = load[i]
# throughput = (800 * self.currentState)
# if l < (800 * self.currentState):
# throughput = l
values = {'inlambda': l, 'num_nodes': self.currentState}
self.log.debug(
"SIMULATE i: " + str(i) + " state: " + str(self.currentState) + " values:" + str(values))
self.take_decision(values)
if __name__ == '__main__':
fsm = RLDecisionMaker("localhost")
fsm.simulate_training_set()
| cmantas/tiramola_v3 | new_decision_module.py | Python | apache-2.0 | 36,005 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Race dataset."""
from tensorflow_datasets.text.race.race import Race
| tensorflow/datasets | tensorflow_datasets/text/race/__init__.py | Python | apache-2.0 | 685 |
# -*- coding: utf-8 -*-
'''
Copyright 2014 FreshPlanet (http://freshplanet.com | opensource@freshplanet.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import datetime
import random
from google.appengine.ext import ndb
import webapp2
from counter.models import Counter
class SampleHandler(webapp2.RequestHandler):
@ndb.toplevel
def get(self):
"""
Increments some Counters to play with the feature.
"""
# Fill datastore with data to show case in admin view
otherSliceId = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
for client in ['iOS', 'Android', 'Windows']:
Counter.increment('newInstalls_' + client, random.randint(1, 5))
Counter.increment('newInstalls_' + client, random.randint(1, 5), sliceId=otherSliceId)
self.response.write("""
Counters updated!
Query for counters <a href="/admin/counters/?prefix=newInstalls">here</a>.
""")
| freshplanet/AppEngine-Counter | counter/views.py | Python | apache-2.0 | 1,493 |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import sys
import os
import urllib2
import httplib # For the httplib.BadStatusLine Exception
import json
import logging
import pickle
import time
from coveragelink import CoverageLink
LINKS_JSON_FILE = 'links.json'
ZUUL_STATUS_FILE = 'status.json'
DEFAULT_ZUUL_STATUS_URL = 'http://zuul.openstack.org/' + ZUUL_STATUS_FILE
DEFAULT_OUTPUT_LOGS = 'http://logs.openstack.org'
PURGE_SECONDS = 60 * 5 # 5 minutes
class CoverageIndex(object):
@staticmethod
def read_from_url(zuul_status_url=DEFAULT_ZUUL_STATUS_URL):
"""Get the provided Zuul status file via provided url"""
try:
res = urllib2.urlopen(zuul_status_url)
json_contents = res.read()
with open(os.path.join(os.sep, 'tmp', ZUUL_STATUS_FILE), 'w') as f:
f.write(json_contents)
except (urllib2.HTTPError, httplib.BadStatusLine):
raise Exception('Unable to read Zuul status at ' + zuul_status_url)
try:
return json.loads(json_contents)
except ValueError:
raise Exception('Unable to parse JSON Zuul status at ' +
zuul_status_url)
@staticmethod
def read_from_file(filename=ZUUL_STATUS_FILE):
"""Read the Zuul status from the provided filename"""
try:
with open(filename, 'r') as f:
json_contents = f.read()
except IOError:
raise Exception('Unable to read Zuul status from ' + filename)
try:
return json.loads(json_contents)
except ValueError:
raise Exception('Unable to parse JSON Zuul status from ' +
filename)
def parse_status(self, data):
"""Parse the provided Zuul Status for post/check pipelines
and look for coverage jobs
"""
coverage_links = []
for pipeline in data['pipelines']:
if pipeline['name'] in ['post', 'check']:
links = self.process_pipeline(pipeline['name'],
pipeline['change_queues'])
coverage_links += links
return coverage_links
def process_pipeline(self, type, queues):
"""For the given pipeline queues identify coverage jobs
and generate the url for the project and pipeline type
"""
pipeline_post = 'post'
pipeline_check = 'check'
coverage_suffix = '-coverage'
report_dir = 'cover'
links = []
for queue in queues:
if queue['heads'] and len(queue['heads']) > 0:
for head in queue['heads'][0]:
id = head['id'].split(',', 2)[0]
for job in head['jobs']:
job_name = job['name']
project = job_name[:len(job_name) -
len(coverage_suffix)]
uri = []
# For 'post' pipeline coverage jobs
if job_name.endswith(coverage_suffix) and job['uuid']:
uuid_prefix = job['uuid'][:7]
if type == pipeline_post:
# e.g. http://logs.openstack.org/b8/b88aa ...
# /post/ironic-coverage/53a1364/cover/
uri = [id[:2], id, type, job_name,
uuid_prefix, report_dir]
elif type == pipeline_check:
# e.g. http://logs.openstack.org/27/219727/1
# /check/rally-coverage/3550a36/cover/
patchset = head['id'].split(',', 2)[1]
uri = [id[-2:], id, patchset, type, job_name,
uuid_prefix, report_dir]
if uri:
url = '/'.join(['http://logs.openstack.org'] + uri)
logging.debug(url)
link = CoverageLink(project, url, type)
links.append(link)
logging.info('Captured {} links for {} '.format(len(links), type))
return links
def validate_links(self, new_links):
"""Process the list of coverage urls to confirm they
exist and have a total line
"""
for entry in new_links:
if entry:
try:
entry.validate()
except Exception as e:
logging.warn(str(e))
if int(time.time()) - entry.created > PURGE_SECONDS:
logging.debug("Purging old link " + entry.url)
new_links.remove(entry)
continue
logging.info('URL verified ' + entry.url)
return
def read_existing_links(self, filename=LINKS_JSON_FILE):
"""Read the existing links file to append new validated
coverage links
"""
try:
with open(filename + '.obj', 'rb') as fo:
links = pickle.load(fo)
logging.info('Loaded {} existing links'.format(len(links)))
except IOError:
return []
return links
def trim_duplicates(self, links):
"""Look for older duplicate project entries and
remove them.
"""
new_links = []
projects = []
for entry in reversed(links):
if entry.project not in projects:
projects.append(entry.project)
new_links.append(entry)
else:
logging.warn('Removal of ' + entry.url)
logging.info('Removed {} duplicate project links'.format(
len(links) - len(new_links)))
return new_links
def publish_links(self, links, filename=LINKS_JSON_FILE):
"""Write the current valid links to the specified file"""
# Save the unique list of links either valid or invalid
# for future reprocessing
links = self.trim_duplicates(links)
try:
logging.info('Saving %d links for reuse' % (len(links)))
with open(filename + '.obj', 'wb') as fo:
pickle.dump(links, fo)
except IOError as e:
logging.error('I/O error({}): {}'.format(e.errno, e.strerror))
# Publish the valid links to a JSON file
valid_links = []
for entry in links:
if entry and entry.isValid():
valid_links.append(entry)
valid_links = self.trim_duplicates(valid_links)
json_links = []
for entry in valid_links:
json_links.append(entry.json())
try:
with open(filename, 'w') as f:
json.dump(json_links, f)
except IOError as e:
logging.error('I/O error({}): {}'.format(e.errno, e.strerror))
return
def __init__(self, filename=None):
logging.info('Processing started')
# Determine if to process url or provided file
if filename:
data = self.read_from_file(sys.argv[1])
else:
try:
data = self.read_from_url()
# if there is an error reading url or parsing url, try again
except Exception:
logging.warning(
'First attempt to read from url failed, retrying')
time.sleep(2)
data = self.read_from_url()
new_links = self.parse_status(data)
if len(new_links) == 0: # No new work
return
self.validate_links(new_links)
# TODO Append existing links
existing_links = self.read_existing_links()
if existing_links:
self.validate_links(existing_links)
new_links = existing_links + new_links
self.publish_links(new_links)
if __name__ == '__main__':
logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
CoverageIndex(sys.argv[1] if len(sys.argv) > 1 else None)
| ronaldbradford/os-demo | coverage/coverageindex.py | Python | apache-2.0 | 8,840 |
from abc import abstractmethod
from typing import Callable, TypeVar, Protocol
from typing_extensions import runtime_checkable
TSource = TypeVar('TSource')
TResult = TypeVar('TResult')
@runtime_checkable
class Applicative(Protocol[TSource, TResult]):
"""Applicative.
Applicative functors are functors with some extra properties.
Most importantly, they allow you to apply functions inside the
functor (hence the name).
To learn more about Applicative functors:
* http://www.davesquared.net/2012/05/fp-newbie-learns-applicatives.html
"""
@abstractmethod
def apply(self, something):
"""Apply wrapped callable.
Python: apply(self: Applicative, something: Applicative[Callable[[A], B]]) -> Applicative
Haskell: (<*>) :: f (a -> b) -> f a -> f b.
Apply (<*>) is a beefed up fmap. It takes a functor value that
has a function in it and another functor, and extracts that
function from the first functor and then maps it over the second
one.
"""
raise NotImplementedError
#def __mul__(self, something):
# """(<*>) :: f (a -> b) -> f a -> f b.
# Provide the * as an infix version of apply() since we cannot
# represent the Haskell's <*> operator in Python.
# """
# return self.apply(something)
#def lift_a2(self, func, b):
# """liftA2 :: (Applicative f) => (a -> b -> c) -> f a -> f b -> f c."""
# return func % self * b
@classmethod
@abstractmethod
def pure(cls, fn: Callable[[TSource], TResult]) -> 'Applicative[TSource, TResult]':
"""Applicative functor constructor.
Use pure if you're dealing with values in an applicative context
(using them with <*>); otherwise, stick to the default class
constructor.
"""
raise NotImplementedError
| dbrattli/OSlash | oslash/typing/applicative.py | Python | apache-2.0 | 1,869 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_attention."""
import numpy as np
import tensorflow as tf
from multiple_user_representations.models import parametric_attention
class ParametricAttentionTest(tf.test.TestCase):
def test_parametric_attention_model_with_single_representation(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=1,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 1, 2])
def test_parametric_attention_model_with_multiple_representations(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=3,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 3, 2])
if __name__ == '__main__':
tf.test.main()
| google-research/google-research | multiple_user_representations/models/parametric_attention_test.py | Python | apache-2.0 | 1,915 |
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def modify_first_group(self):
self.modify_group_by_index(0)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
| EnigmaCK/Python_training | fixture/group.py | Python | apache-2.0 | 3,962 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Rastislav Szabo <raszabo@cisco.com>, Lukas Macko <lmacko@cisco.com>"
__copyright__ = "Copyright 2016, Cisco Systems, Inc."
__license__ = "Apache 2.0"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# sysrepod must be in PATH
from ConcurrentHelpers import *
import subprocess
import TestModule
import libsysrepoPython3
class SysrepoctlTester(SysrepoTester):
sysrepoctl = "{}/src/sysrepoctl".format(os.path.realpath(os.curdir))
def installModuleStep(self, yang_file, log_level = sr.SR_LL_INF):
self.process = subprocess.Popen([self.sysrepoctl, "-i", "--yang={0}".format(yang_file), "-L {0}".format(log_level)])
rc = self.process.wait()
self.tc.assertEqual(rc, 0)
def uninstallModuleFailStep(self, module_name, log_level = sr.SR_LL_INF):
self.process = subprocess.Popen([self.sysrepoctl, "--uninstall", "--module={0}".format(module_name), "-L {0}".format(log_level)])
rc = self.process.wait()
self.tc.assertNotEquals(rc, 0)
def uninstallModuleStep(self, module_name, log_level = sr.SR_LL_INF):
self.process = subprocess.Popen([self.sysrepoctl, "--uninstall", "--module={0}".format(module_name), "-L {0}".format(log_level)])
rc = self.process.wait()
self.tc.assertEqual(rc, 0)
class SchemasManagementTest(unittest.TestCase):
@classmethod
def setUpClass(self):
TestModule.create_test_module()
def test_ModuleLoading(self):
"""Schemas are loaded on demand. Try to send multiple requests targeting the same model
simultaneously. All of the should receive correct data.
"""
tm = TestManager()
srd = SysrepodDaemonTester("Srd")
tester1 = SysrepoTester("First", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
tester2 = SysrepoTester("Second", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
tester3 = SysrepoTester("Third", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
tester4 = SysrepoTester("Fourth", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
srd.add_step(srd.startDaemonStep)
tester1.add_step(tester1.waitStep)
tester2.add_step(tester2.waitStep)
tester3.add_step(tester3.waitStep)
tester4.add_step(tester4.waitStep)
srd.add_step(srd.waitStep)
tester1.add_step(tester1.restartConnection)
tester2.add_step(tester2.restartConnection)
tester3.add_step(tester3.restartConnection)
tester4.add_step(tester4.restartConnection)
srd.add_step(srd.waitStep)
tester1.add_step(tester1.getItemsStepExpectedCount, "/test-module:main/*", 19)
tester2.add_step(tester2.getItemsStepExpectedCount, "/test-module:main/*", 19)
tester3.add_step(tester3.getItemsStepExpectedCount, "/test-module:main/*", 19)
tester4.add_step(tester4.getItemsStepExpectedCount, "/test-module:main/*", 19)
srd.add_step(srd.stopDaemonStep)
tm.add_tester(srd)
tm.add_tester(tester1)
tm.add_tester(tester2)
tm.add_tester(tester3)
tm.add_tester(tester4)
tm.run()
def test_module_uninstall(self):
"""A schema can not be uninstalled until it is used by a session.
Test simulates the request of sysrepoctl trying to uninstall/install module.
"""
tmp_file = "/tmp/test-module.yang" # used to reinstall 'test-module' after uninstall
dep_file = "/tmp/referenced-data.yang" # 'test-module' depends on 'referenced-data'
tm = TestManager()
srd = SysrepodDaemonTester("Srd")
tester1 = SysrepoTester("First", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
tester2 = SysrepoTester("Second", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
tester3 = SysrepoTester("Third", sr.SR_DS_STARTUP, sr.SR_CONN_DAEMON_REQUIRED, False)
admin = SysrepoctlTester()
srd.add_step(srd.startDaemonStep)
tester1.add_step(tester1.waitStep)
tester2.add_step(tester2.waitStep)
tester3.add_step(tester3.waitStep)
admin.add_step(admin.waitStep)
srd.add_step(srd.waitStep)
tester1.add_step(tester1.restartConnection)
tester2.add_step(tester2.restartConnection)
tester3.add_step(tester3.restartConnection)
admin.add_step(admin.waitStep)
srd.add_step(srd.waitStep)
tester1.add_step(tester1.getItemsStepExpectedCount, "/test-module:main/*", 19)
tester2.add_step(tester2.setItemStep, "/test-module:main/string", sr.Val("abcd", sr.SR_STRING_T))
tester3.add_step(tester3.lockModelStep, "test-module")
admin.add_step(admin.waitStep)
#unsuccesful try to uninstall
srd.add_step(srd.waitStep)
tester1.add_step(tester1.waitStep)
tester2.add_step(tester2.waitStep)
tester3.add_step(tester3.waitStep)
admin.add_step(admin.uninstallModuleFailStep, "test-module")
#export schema to file before uninstall and release lock
srd.add_step(srd.waitStep)
admin.add_step(admin.waitStep)
tester1.add_step(tester1.getSchemaToFileStep, "test-module", tmp_file)
tester2.add_step(tester2.getSchemaToFileStep, "referenced-data", dep_file)
tester3.add_step(tester3.unlockModelStep, "test-module")
#testers 1,2 close the session, tester 3 releases the lock -> module can be uninstalled
srd.add_step(srd.waitStep)
admin.add_step(admin.waitStep)
tester1.add_step(tester1.stopSession)
tester2.add_step(tester2.stopSession)
tester3.add_step(tester3.waitStep)
#uninstall succeed
srd.add_step(srd.waitStep)
admin.add_step(admin.uninstallModuleStep, "test-module")
tester3.add_step(tester3.waitStep)
#module is uninstalled
srd.add_step(srd.waitStep)
admin.add_step(admin.waitStep)
tester3.add_step(tester3.setItemFailStep, "/test-module:main/string", sr.Val("abcd", sr.SR_STRING_T))
#install module back
srd.add_step(srd.waitStep)
admin.add_step(admin.installModuleStep, tmp_file)
tester3.add_step(tester3.waitStep)
#request work again
srd.add_step(srd.waitStep)
tester3.add_step(tester3.setItemStep, "/test-module:main/string", sr.Val("abcd", sr.SR_STRING_T))
srd.add_step(srd.stopDaemonStep)
tm.add_tester(srd)
tm.add_tester(tester1)
tm.add_tester(tester2)
tm.add_tester(tester3)
tm.add_tester(admin)
tm.run()
if __name__ == '__main__':
unittest.main()
| morganzhh/sysrepo | swig/python3/tests/SchemasManagementTest.py | Python | apache-2.0 | 7,156 |
import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
# Use the docstring of the __init__ file to be the description
DESC = " ".join(__import__('cookiesession').__doc__.splitlines()).strip()
setup(
name = 'django-cookiesession',
version = __import__('cookiesession').get_version().replace(' ', '-'),
url = 'http://github.com/washingtontimes/django-cookiesession',
author = 'Justin Quick, Corey Oordt, The Washington Times',
author_email = 'webdev@washingtontimes.com',
description = DESC,
long_description = read_file('README'),
packages=find_packages(),
include_package_data = True,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
],
) | callowayproject/django-cookiesession | setup.py | Python | apache-2.0 | 1,070 |
# -*- coding: utf-8 -*-
__author__ = 'Alex Starov'
try:
from django.utils.simplejson import dumps
# import simplejson as json
except ImportError:
from json import dumps
# import json
from django.http import HttpResponse
def callback_data_send(request, ):
if request.is_ajax():
if request.method == 'POST':
# request_cookie = request.session.get(u'cookie', None, )
# if request_cookie:
sessionid = request.POST.get(u'sessionid', None, )
print('CallBack:', )
print('sessionid: ', sessionid, )
userid = request.POST.get(u'userid', False, )
print('userid: ', userid, )
print('userid type: ', type(userid, ), )
if userid == 'None':
userid = False
name = request.POST.get(u'name', None, )
print('name: ', name.encode('utf8', ), )
email = request.POST.get(u'email', None, )
print('email: ', email, )
phone = request.POST.get(u'phone', None, )
print('phone: ', phone, )
from applications.callback.models import CallBack
try:
if userid:
""" Error: invalid literal for int() with base 10: 'None' """
""" Ошибка вылазила из за того, что я пытался подсунуть вместо int() в user_id - None """
print(userid, )
callback = CallBack.objects.create(sessionid=sessionid,
user_id=userid,
name=name,
email=email,
phone=phone, )
else:
callback = CallBack.objects.create(sessionid=sessionid,
name=name,
email=email,
phone=phone, )
except Exception as e:
print('Exception: ', e, )
print('Exception message: ', e.message, )
response = {'result': 'Bad',
'error': e.message, }
data = dumps(response, )
mimetype = 'application/javascript'
return HttpResponse(data, mimetype, )
else:
print(callback, )
""" Отправка заказа обратного звонка """
subject = u'Заказ обратного звонка от пользователя: %s на номер: %s. Интернет магазин Кексик.' % (name, phone, )
from django.template.loader import render_to_string
html_content = render_to_string('email_request_callback_content.html',
{'name': name,
'email': email,
'phone': phone, }, )
from django.utils.html import strip_tags
text_content = strip_tags(html_content, )
from_email = u'Интерент магазин Кексик <site@keksik.com.ua>'
from django.core.mail import get_connection
backend = get_connection(backend='django.core.mail.backends.smtp.EmailBackend',
fail_silently=False, )
from django.core.mail import EmailMultiAlternatives
from proj.settings import Email_MANAGER
msg = EmailMultiAlternatives(subject=subject,
body=text_content,
from_email=from_email,
to=[Email_MANAGER, ],
connection=backend, )
msg.attach_alternative(content=html_content,
mimetype="text/html", )
msg.content_subtype = "html"
msg.send(fail_silently=False, )
""" Отправка благодарности клиенту. """
subject = u'Ваш заказ обратного звонка с сайта принят. Интернет магазин Кексик.'
html_content = render_to_string('email_successful_request_callback_content.html', )
text_content = strip_tags(html_content, )
# from_email = u'site@keksik.com.ua'
to_email = email
msg = EmailMultiAlternatives(subject=subject,
body=text_content,
from_email=from_email,
to=[to_email, ],
connection=backend, )
msg.attach_alternative(content=html_content,
mimetype="text/html", )
from smtplib import SMTPSenderRefused, SMTPDataError
try:
msg.send(fail_silently=False, )
except SMTPSenderRefused as e:
response = {'result': 'Bad',
'error': e, }
else:
response = {'result': 'Ok', }
data = dumps(response, )
mimetype = 'application/javascript'
return HttpResponse(data, mimetype, )
# else:
# response = {'result': 'Bad',
# 'error': u'Вы только-что зашли на сайт!!!', }
# data = dumps(response, )
# mimetype = 'application/javascript'
# return HttpResponse(data, mimetype, )
elif request.method == 'GET':
return HttpResponse(status=400, )
else:
return HttpResponse(status=400, )
else:
return HttpResponse(status=400, )
| AlexStarov/Shop | applications/ajax/callback.py | Python | apache-2.0 | 6,232 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType
from boto.ec2.blockdevicemapping import BlockDeviceMapping
import time
import copy
import argparse
import sys
import pprint
import os
import yaml
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(BASE_PATH, '../configs')
def launch_from_config(conn, instance_config_name, config_file_name):
spot_requests_config = get_config(config_file_name)
config = spot_requests_config[instance_config_name]
mapping = create_mapping(config)
print 'Launching %s instances'%(instance_config_name)
print 'Instance parameters:'
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
spot_req = conn.request_spot_instances(
config['price'],
config['ami_id'],
count=config['count'],
type=config['type'],
key_name=config['key_name'],
instance_type=config['instance_type'],
placement_group=config['placement_group'],
security_group_ids=config['security_groups'],
subnet_id=config['subnet_id'],
instance_profile_name=config['instance_profile_name'],
block_device_map=mapping
)
request_ids = [req.id for req in spot_req]
print 'Waiting for fulfillment'
instance_ids = wait_for_fulfillment(conn, request_ids,
copy.deepcopy(request_ids))
if 'tags' in config:
tag_instances(conn, instance_ids, config['tags'])
return instance_ids
def get_config(config_file_name):
config_file = open(os.path.join(CONFIG_PATH, config_file_name))
config_dict = yaml.load(config_file.read())
return config_dict
def create_mapping(config):
if 'mapping' not in config:
return None
mapping = BlockDeviceMapping()
for ephemeral_name, device_path in config['mapping'].iteritems():
ephemeral = BlockDeviceType()
ephemeral.ephemeral_name = ephemeral_name
mapping[device_path] = ephemeral
return mapping
def wait_for_fulfillment(conn, request_ids, pending_request_ids):
"""Loop through all pending request ids waiting for them to be fulfilled.
If a request is fulfilled, remove it from pending_request_ids.
If there are still pending requests, sleep and check again in 10 seconds.
Only return when all spot requests have been fulfilled."""
instance_ids = []
failed_ids = []
time.sleep(10)
pending_statuses = set(['pending-evaluation', 'pending-fulfillment'])
while len(pending_request_ids) > 0:
results = conn.get_all_spot_instance_requests(
request_ids=pending_request_ids)
for result in results:
if result.status.code == 'fulfilled':
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s fulfilled!'%result.id
instance_ids.append(result.instance_id)
elif result.status.code not in pending_statuses:
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s could not be fulfilled. ' \
'Status code: %s'%(result.id, result.status.code)
failed_ids.append(result.id)
if len(pending_request_ids) > 0:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(10)
if len(failed_ids) > 0:
print 'The following spot requests ' \
'have failed: %s'%(', '.join(failed_ids))
else:
print 'All spot requests fulfilled!'
return instance_ids
def tag_instances(conn, instance_ids, tags):
instances = conn.get_only_instances(instance_ids=instance_ids)
for instance in instances:
for key, value in tags.iteritems():
instance.add_tag(key=key, value=value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('instance', type=str,
help='Instance config name to launch')
parser.add_argument('-r', '--region', type=str, default='us-east-1',
help='EC2 region name')
parser.add_argument('-c', '--config-file', type=str, default='spot_requests.yml',
help='Spot requests config file name')
args = parser.parse_args()
conn = boto.ec2.connect_to_region(args.region)
config_file_name = args.config_file
instance_config_name = args.instance
launch_from_config(conn, instance_config_name, config_file_name)
if __name__ == '__main__':
main()
| vianasw/spot_launcher | spot_launcher/spot_launcher.py | Python | apache-2.0 | 4,508 |
# -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2014 - 2016
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
======
Jobs
======
Jobs have been entered into the scheduler once. They may be even finished
already.
"""
if __doc__: # pragma: no cover
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import collections as _collections
import itertools as _it
from .. import _graph
from .. import interfaces as _interfaces
from .. import _lock
#: Exception raised on cycles, when a todo DAG is resolved
DependencyCycle = _graph.DependencyCycle
#: Job ID sequence
#:
#: :Type: callable
_gen_id = _it.count(1).next
def last_job_id():
"""
Determine the largest job ID assigned until now
:Return: The ID. It's ``0``, if no job ID was assigned until now (job IDs
start with ``1``)
:Rtype: ``id``
"""
# this inspects the counter iterable by calling pickling methods and
# retrieving the next value from there and then subtracting one.
# __reduce__ returns the factory ('count') and the argument tuple
# containing the initial value (advanced with each call to next())
# pylint: disable = no-member
return _gen_id.__self__.__reduce__()[1][0] - 1
class Job(object):
"""
Job after is been scheduled.
:See: `JobInterface`
"""
__implements__ = [_interfaces.JobInterface]
def __init__(self, job_id, desc, group, locks, importance, not_before,
extra, predecessors, attempts):
"""
Initialization
:Parameters:
`job_id` : ``int``
Job ID
`desc` : `TodoDescription`
Job description
`group` : ``str``
Job Group
`locks` : iterable
List of locks that need to be aquired (``(`LockInterface`, ...)``)
`importance` : ``int``
Job importance
`not_before` : various
execute job not before this time. Special formats are allowed:
``int``
Number of seconds from now (delay)
``datetime.datetime``
a specific point in time (server time). Use UTC if you can. For
naive date times, UTC is assumed.
If omitted or ``None``, ``0`` is assumed.
`extra` : ``dict``
Extra job data
`predecessors` : iterable
List of jobs to be run successfully before this one
(``(int, ...)``)
`attempts` : ``list``
execution attempts (``[ExecutionAttemptInterface, ...]``)
"""
self.id = job_id
self.desc = desc
self.group = group
self.locks = _lock.validate(locks)
self.locks_waiting = None
self.importance = importance
self.extra = extra
self.predecessors = set()
self.predecessors_waiting = None
self.attempts = attempts
self.not_before = not_before
for item in predecessors or ():
self.depend_on(item)
def depend_on(self, job_id):
"""
Add predecessor job ID
Duplicates are silently ignored.
:See: `interfaces.JobInterface.depend_on`
"""
assert self.predecessors_waiting is None
try:
job_id = int(job_id)
except TypeError:
raise ValueError("Invalid job_id: %r" % (job_id,))
if job_id < 1 or job_id >= self.id:
raise ValueError("Invalid job_id: %r" % (job_id,))
self.predecessors.add(job_id)
def job_from_todo(todo):
"""
Construct Job from Todo
:Parameters:
`todo` : `Todo`
Todo to construct from
:Return: New job instance
:Rtype: `JobInterface`
"""
return Job(
_gen_id(), todo.desc, todo.group, todo.locks, todo.importance,
todo.not_before, {}, set(), []
)
def joblist_from_todo(todo):
"""
Construct a list of jobs from Todo graph
:Parameters:
`todo` : `Todo`
todo to be inspected.
:Return: List of jobs (``[JobInterface, ...]``)
:Rtype: ``list``
"""
jobs, todos, virtuals = [], {}, {}
toinspect = _collections.deque([(todo, None)])
graph = _graph.DependencyGraph()
# 1) fill the dependency graph with the todo nodes (detects cycles, too)
try:
while toinspect:
todo, parent = toinspect.pop()
todo_id = id(todo)
if todo_id in todos:
virtual_id, pre, _ = todos[todo_id]
else:
pre = []
virtual_id = len(virtuals)
todos[todo_id] = virtual_id, pre, todo
virtuals[virtual_id] = todo_id
for parent_id in todo.predecessors():
graph.add((False, parent_id), (True, virtual_id))
pre.append((False, parent_id))
for succ in todo.successors():
toinspect.appendleft((succ, (True, virtual_id)))
if parent is not None:
graph.add(parent, (True, virtual_id))
pre.append(parent)
else:
graph.add((False, None), (True, virtual_id))
except DependencyCycle as e:
# remap to our input (todos and not some weird virtual IDs)
raise DependencyCycle([
todos[virtuals[tup[1]]][2] for tup in e.args[0]
])
# 2) resolve the graph (create topological order)
id_mapping = {}
for is_virtual, virtual_id in graph.resolve():
if is_virtual:
_, pres, todo = todos[virtuals[virtual_id]]
job = job_from_todo(todo)
for is_virtual, pre in pres:
if is_virtual:
pre = id_mapping[pre]
job.depend_on(pre)
id_mapping[virtual_id] = job.id
jobs.append(job)
return jobs
| ndparker/wolfe | wolfe/scheduler/_job.py | Python | apache-2.0 | 6,561 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from sumy.models import TfDocumentModel
from sumy.nlp.tokenizers import Tokenizer
def test_no_tokenizer_with_string():
with pytest.raises(ValueError):
TfDocumentModel("text without tokenizer")
def test_pretokenized_words():
model = TfDocumentModel(("wA", "WB", "wB", "WA"))
terms = tuple(sorted(model.terms))
assert terms == ("wa", "wb")
def test_pretokenized_words_frequencies():
model = TfDocumentModel(("wC", "wC", "WC", "wA", "WB", "wB"))
assert model.term_frequency("wa") == 1
assert model.term_frequency("wb") == 2
assert model.term_frequency("wc") == 3
assert model.term_frequency("wd") == 0
assert model.most_frequent_terms() == ("wc", "wb", "wa")
def test_magnitude():
tokenizer = Tokenizer("english")
text = "wA wB wC wD"
model = TfDocumentModel(text, tokenizer)
assert model.magnitude == pytest.approx(2.0)
def test_terms():
tokenizer = Tokenizer("english")
text = "wA wB wC wD wB wD wE"
model = TfDocumentModel(text, tokenizer)
terms = tuple(sorted(model.terms))
assert terms == ("wa", "wb", "wc", "wd", "we")
def test_term_frequency():
tokenizer = Tokenizer("english")
text = "wA wB wC wA wA wC wD wCwB"
model = TfDocumentModel(text, tokenizer)
assert model.term_frequency("wa") == 3
assert model.term_frequency("wb") == 1
assert model.term_frequency("wc") == 2
assert model.term_frequency("wd") == 1
assert model.term_frequency("wcwb") == 1
assert model.term_frequency("we") == 0
assert model.term_frequency("missing") == 0
def test_most_frequent_terms():
tokenizer = Tokenizer("english")
text = "wE wD wC wB wA wE WD wC wB wE wD WE wC wD wE"
model = TfDocumentModel(text, tokenizer)
assert model.most_frequent_terms(1) == ("we",)
assert model.most_frequent_terms(2) == ("we", "wd")
assert model.most_frequent_terms(3) == ("we", "wd", "wc")
assert model.most_frequent_terms(4) == ("we", "wd", "wc", "wb")
assert model.most_frequent_terms(5) == ("we", "wd", "wc", "wb", "wa")
assert model.most_frequent_terms() == ("we", "wd", "wc", "wb", "wa")
def test_most_frequent_terms_empty():
tokenizer = Tokenizer("english")
model = TfDocumentModel("", tokenizer)
assert model.most_frequent_terms() == ()
assert model.most_frequent_terms(10) == ()
def test_most_frequent_terms_negative_count():
tokenizer = Tokenizer("english")
model = TfDocumentModel("text", tokenizer)
with pytest.raises(ValueError):
model.most_frequent_terms(-1)
def test_normalized_words_frequencies():
words = "a b c d e c b d c e e d e d e".split()
model = TfDocumentModel(tuple(words))
assert model.normalized_term_frequency("a") == pytest.approx(1/5)
assert model.normalized_term_frequency("b") == pytest.approx(2/5)
assert model.normalized_term_frequency("c") == pytest.approx(3/5)
assert model.normalized_term_frequency("d") == pytest.approx(4/5)
assert model.normalized_term_frequency("e") == pytest.approx(5/5)
assert model.normalized_term_frequency("z") == pytest.approx(0.0)
assert model.most_frequent_terms() == ("e", "d", "c", "b", "a")
def test_normalized_words_frequencies_with_smoothing_term():
words = "a b c d e c b d c e e d e d e".split()
model = TfDocumentModel(tuple(words))
assert model.normalized_term_frequency("a", 0.5) == pytest.approx(0.5 + 1/10)
assert model.normalized_term_frequency("b", 0.5) == pytest.approx(0.5 + 2/10)
assert model.normalized_term_frequency("c", 0.5) == pytest.approx(0.5 + 3/10)
assert model.normalized_term_frequency("d", 0.5) == pytest.approx(0.5 + 4/10)
assert model.normalized_term_frequency("e", 0.5) == pytest.approx(0.5 + 5/10)
assert model.normalized_term_frequency("z", 0.5) == pytest.approx(0.5)
assert model.most_frequent_terms() == ("e", "d", "c", "b", "a")
| miso-belica/sumy | tests/test_models/test_tf.py | Python | apache-2.0 | 4,023 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual bases classes for uploading media via Google APIs.
Supported here are:
* simple (media) uploads
* multipart uploads that contain both metadata and a small file as payload
* resumable uploads (with metadata as well)
"""
import http.client
import json
import os
import random
import sys
from google import _async_resumable_media
from google._async_resumable_media import _helpers
from google.resumable_media import _helpers as sync_helpers
from google.resumable_media import _upload as sync_upload
from google.resumable_media import common
from google.resumable_media._upload import (
_CONTENT_TYPE_HEADER,
_CONTENT_RANGE_TEMPLATE,
_RANGE_UNKNOWN_TEMPLATE,
_EMPTY_RANGE_TEMPLATE,
_BOUNDARY_FORMAT,
_MULTIPART_SEP,
_CRLF,
_MULTIPART_BEGIN,
_RELATED_HEADER,
_BYTES_RANGE_RE,
_STREAM_ERROR_TEMPLATE,
_POST,
_PUT,
_UPLOAD_CHECKSUM_MISMATCH_MESSAGE,
_UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE,
)
class UploadBase(object):
"""Base class for upload helpers.
Defines core shared behavior across different upload types.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def __init__(self, upload_url, headers=None):
self.upload_url = upload_url
if headers is None:
headers = {}
self._headers = headers
self._finished = False
self._retry_strategy = common.RetryStrategy()
@property
def finished(self):
"""bool: Flag indicating if the upload has completed."""
return self._finished
def _process_response(self, response):
"""Process the response from an HTTP request.
This is everything that must be done after a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Args:
response (object): The HTTP response object.
Raises:
~google.resumable_media.common.InvalidResponse: If the status
code is not 200.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
# Tombstone the current upload so it cannot be used again (in either
# failure or success).
self._finished = True
_helpers.require_status_code(response, (http.client.OK,), self._get_status_code)
@staticmethod
def _get_status_code(response):
"""Access the status code from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
@staticmethod
def _get_headers(response):
"""Access the headers from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
@staticmethod
def _get_body(response):
"""Access the response body from an HTTP response.
Args:
response (object): The HTTP response object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class SimpleUpload(UploadBase):
"""Upload a resource to a Google API.
A **simple** media upload sends no metadata and completes the upload
in a single request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def _prepare_request(self, data, content_type):
"""Prepare the contents of an HTTP request.
This is everything that must be done before a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
.. note:
This method will be used only once, so ``headers`` will be
mutated by having a new key added to it.
Args:
data (bytes): The resource content to be uploaded.
content_type (str): The content type for the request.
Returns:
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always POST)
* the URL for the request
* the body of the request
* headers for the request
Raises:
ValueError: If the current upload has already finished.
TypeError: If ``data`` isn't bytes.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.finished:
raise ValueError("An upload can only be used once.")
if not isinstance(data, bytes):
raise TypeError("`data` must be bytes, received", type(data))
self._headers[_CONTENT_TYPE_HEADER] = content_type
return _POST, self.upload_url, data, self._headers
def transmit(self, transport, data, content_type, timeout=None):
"""Transmit the resource to be uploaded.
Args:
transport (object): An object which can make authenticated
requests.
data (bytes): The resource content to be uploaded.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class MultipartUpload(UploadBase):
"""Upload a resource with metadata to a Google API.
A **multipart** upload sends both metadata and the resource in a single
(multipart) request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The request metadata will be amended
to include the computed value. Using this option will override a
manually-set checksum value. Supported values are "md5", "crc32c"
and None. The default is None.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def __init__(self, upload_url, headers=None, checksum=None):
super(MultipartUpload, self).__init__(upload_url, headers=headers)
self._checksum_type = checksum
def _prepare_request(self, data, metadata, content_type):
"""Prepare the contents of an HTTP request.
This is everything that must be done before a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
.. note:
This method will be used only once, so ``headers`` will be
mutated by having a new key added to it.
Args:
data (bytes): The resource content to be uploaded.
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
Returns:
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always POST)
* the URL for the request
* the body of the request
* headers for the request
Raises:
ValueError: If the current upload has already finished.
TypeError: If ``data`` isn't bytes.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.finished:
raise ValueError("An upload can only be used once.")
if not isinstance(data, bytes):
raise TypeError("`data` must be bytes, received", type(data))
checksum_object = sync_helpers._get_checksum_object(self._checksum_type)
if checksum_object is not None:
checksum_object.update(data)
actual_checksum = sync_helpers.prepare_checksum_digest(
checksum_object.digest()
)
metadata_key = sync_helpers._get_metadata_key(self._checksum_type)
metadata[metadata_key] = actual_checksum
content, multipart_boundary = construct_multipart_request(
data, metadata, content_type
)
multipart_content_type = _RELATED_HEADER + multipart_boundary + b'"'
self._headers[_CONTENT_TYPE_HEADER] = multipart_content_type
return _POST, self.upload_url, content, self._headers
def transmit(self, transport, data, metadata, content_type, timeout=None):
"""Transmit the resource to be uploaded.
Args:
transport (object): An object which can make authenticated
requests.
data (bytes): The resource content to be uploaded.
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
class ResumableUpload(UploadBase, sync_upload.ResumableUpload):
"""Initiate and fulfill a resumable upload to a Google API.
A **resumable** upload sends an initial request with the resource metadata
and then gets assigned an upload ID / upload URL to send bytes to.
Using the upload URL, the upload is then done in chunks (determined by
the user) until all bytes have been uploaded.
Args:
upload_url (str): The URL where the resumable upload will be initiated.
chunk_size (int): The size of each chunk used to upload the resource.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the :meth:`initiate` request, e.g. headers for
encrypted data. These **will not** be sent with
:meth:`transmit_next_chunk` or :meth:`recover` requests.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. After the upload is complete, the
server-computed checksum of the resulting object will be read
and google.resumable_media.common.DataCorruption will be raised on
a mismatch. The corrupted file will not be deleted from the remote
host automatically. Supported values are "md5", "crc32c" and None.
The default is None.
Attributes:
upload_url (str): The URL where the content will be uploaded.
Raises:
ValueError: If ``chunk_size`` is not a multiple of
:data:`.UPLOAD_CHUNK_SIZE`.
"""
def __init__(self, upload_url, chunk_size, checksum=None, headers=None):
super(ResumableUpload, self).__init__(upload_url, headers=headers)
if chunk_size % _async_resumable_media.UPLOAD_CHUNK_SIZE != 0:
raise ValueError(
"{} KB must divide chunk size".format(
_async_resumable_media.UPLOAD_CHUNK_SIZE / 1024
)
)
self._chunk_size = chunk_size
self._stream = None
self._content_type = None
self._bytes_uploaded = 0
self._bytes_checksummed = 0
self._checksum_type = checksum
self._checksum_object = None
self._total_bytes = None
self._resumable_url = None
self._invalid = False
@property
def invalid(self):
"""bool: Indicates if the upload is in an invalid state.
This will occur if a call to :meth:`transmit_next_chunk` fails.
To recover from such a failure, call :meth:`recover`.
"""
return self._invalid
@property
def chunk_size(self):
"""int: The size of each chunk used to upload the resource."""
return self._chunk_size
@property
def resumable_url(self):
"""Optional[str]: The URL of the in-progress resumable upload."""
return self._resumable_url
@property
def bytes_uploaded(self):
"""int: Number of bytes that have been uploaded."""
return self._bytes_uploaded
@property
def total_bytes(self):
"""Optional[int]: The total number of bytes to be uploaded.
If this upload is initiated (via :meth:`initiate`) with
``stream_final=True``, this value will be populated based on the size
of the ``stream`` being uploaded. (By default ``stream_final=True``.)
If this upload is initiated with ``stream_final=False``,
:attr:`total_bytes` will be :data:`None` since it cannot be
determined from the stream.
"""
return self._total_bytes
def _prepare_initiate_request(
self, stream, metadata, content_type, total_bytes=None, stream_final=True
):
"""Prepare the contents of HTTP request to initiate upload.
This is everything that must be done before a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Args:
stream (IO[bytes]): The stream (i.e. file-like object) that will
be uploaded. The stream **must** be at the beginning (i.e.
``stream.tell() == 0``).
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
total_bytes (Optional[int]): The total number of bytes to be
uploaded. If specified, the upload size **will not** be
determined from the stream (even if ``stream_final=True``).
stream_final (Optional[bool]): Indicates if the ``stream`` is
"final" (i.e. no more bytes will be added to it). In this case
we determine the upload size from the size of the stream. If
``total_bytes`` is passed, this argument will be ignored.
Returns:
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always POST)
* the URL for the request
* the body of the request
* headers for the request
Raises:
ValueError: If the current upload has already been initiated.
ValueError: If ``stream`` is not at the beginning.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.resumable_url is not None:
raise ValueError("This upload has already been initiated.")
if stream.tell() != 0:
raise ValueError("Stream must be at beginning.")
self._stream = stream
self._content_type = content_type
headers = {
_CONTENT_TYPE_HEADER: "application/json; charset=UTF-8",
"x-upload-content-type": content_type,
}
# Set the total bytes if possible.
if total_bytes is not None:
self._total_bytes = total_bytes
elif stream_final:
self._total_bytes = get_total_bytes(stream)
# Add the total bytes to the headers if set.
if self._total_bytes is not None:
content_length = "{:d}".format(self._total_bytes)
headers["x-upload-content-length"] = content_length
headers.update(self._headers)
payload = json.dumps(metadata).encode("utf-8")
return _POST, self.upload_url, payload, headers
def _process_initiate_response(self, response):
"""Process the response from an HTTP request that initiated upload.
This is everything that must be done after a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
This method takes the URL from the ``Location`` header and stores it
for future use. Within that URL, we assume the ``upload_id`` query
parameter has been included, but we do not check.
Args:
response (object): The HTTP response object (need headers).
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
_helpers.require_status_code(
response,
(http.client.OK,),
self._get_status_code,
callback=self._make_invalid,
)
self._resumable_url = _helpers.header_required(
response, "location", self._get_headers
)
def initiate(
self,
transport,
stream,
metadata,
content_type,
total_bytes=None,
stream_final=True,
timeout=None,
):
"""Initiate a resumable upload.
By default, this method assumes your ``stream`` is in a "final"
state ready to transmit. However, ``stream_final=False`` can be used
to indicate that the size of the resource is not known. This can happen
if bytes are being dynamically fed into ``stream``, e.g. if the stream
is attached to application logs.
If ``stream_final=False`` is used, :attr:`chunk_size` bytes will be
read from the stream every time :meth:`transmit_next_chunk` is called.
If one of those reads produces strictly fewer bites than the chunk
size, the upload will be concluded.
Args:
transport (object): An object which can make authenticated
requests.
stream (IO[bytes]): The stream (i.e. file-like object) that will
be uploaded. The stream **must** be at the beginning (i.e.
``stream.tell() == 0``).
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
total_bytes (Optional[int]): The total number of bytes to be
uploaded. If specified, the upload size **will not** be
determined from the stream (even if ``stream_final=True``).
stream_final (Optional[bool]): Indicates if the ``stream`` is
"final" (i.e. no more bytes will be added to it). In this case
we determine the upload size from the size of the stream. If
``total_bytes`` is passed, this argument will be ignored.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
def _prepare_request(self):
"""Prepare the contents of HTTP request to upload a chunk.
This is everything that must be done before a request that doesn't
require network I/O. This is based on the `sans-I/O`_ philosophy.
For the time being, this **does require** some form of I/O to read
a chunk from ``stream`` (via :func:`get_next_chunk`). However, this
will (almost) certainly not be network I/O.
Returns:
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always PUT)
* the URL for the request
* the body of the request
* headers for the request
The headers **do not** incorporate the ``_headers`` on the
current instance.
Raises:
ValueError: If the current upload has finished.
ValueError: If the current upload is in an invalid state.
ValueError: If the current upload has not been initiated.
ValueError: If the location in the stream (i.e. ``stream.tell()``)
does not agree with ``bytes_uploaded``.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if self.finished:
raise ValueError("Upload has finished.")
if self.invalid:
raise ValueError(
"Upload is in an invalid state. To recover call `recover()`."
)
if self.resumable_url is None:
raise ValueError(
"This upload has not been initiated. Please call "
"initiate() before beginning to transmit chunks."
)
start_byte, payload, content_range = get_next_chunk(
self._stream, self._chunk_size, self._total_bytes
)
if start_byte != self.bytes_uploaded:
msg = _STREAM_ERROR_TEMPLATE.format(start_byte, self.bytes_uploaded)
raise ValueError(msg)
self._update_checksum(start_byte, payload)
headers = {
_CONTENT_TYPE_HEADER: self._content_type,
_helpers.CONTENT_RANGE_HEADER: content_range,
}
return _PUT, self.resumable_url, payload, headers
def _make_invalid(self):
"""Simple setter for ``invalid``.
This is intended to be passed along as a callback to helpers that
raise an exception so they can mark this instance as invalid before
raising.
"""
self._invalid = True
async def _process_resumable_response(self, response, bytes_sent):
"""Process the response from an HTTP request.
This is everything that must be done after a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Args:
response (object): The HTTP response object.
bytes_sent (int): The number of bytes sent in the request that
``response`` was returned for.
Raises:
~google.resumable_media.common.InvalidResponse: If the status
code is 308 and the ``range`` header is not of the form
``bytes 0-{end}``.
~google.resumable_media.common.InvalidResponse: If the status
code is not 200 or 308.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
status_code = _helpers.require_status_code(
response,
(http.client.OK, http.client.PERMANENT_REDIRECT),
self._get_status_code,
callback=self._make_invalid,
)
if status_code == http.client.OK:
# NOTE: We use the "local" information of ``bytes_sent`` to update
# ``bytes_uploaded``, but do not verify this against other
# state. However, there may be some other information:
#
# * a ``size`` key in JSON response body
# * the ``total_bytes`` attribute (if set)
# * ``stream.tell()`` (relying on fact that ``initiate()``
# requires stream to be at the beginning)
self._bytes_uploaded = self._bytes_uploaded + bytes_sent
# Tombstone the current upload so it cannot be used again.
self._finished = True
# Validate the checksum. This can raise an exception on failure.
await self._validate_checksum(response)
else:
bytes_range = _helpers.header_required(
response,
_helpers.RANGE_HEADER,
self._get_headers,
callback=self._make_invalid,
)
match = _BYTES_RANGE_RE.match(bytes_range)
if match is None:
self._make_invalid()
raise common.InvalidResponse(
response,
'Unexpected "range" header',
bytes_range,
'Expected to be of the form "bytes=0-{end}"',
)
self._bytes_uploaded = int(match.group("end_byte")) + 1
async def _validate_checksum(self, response):
"""Check the computed checksum, if any, against the response headers.
Args:
response (object): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the checksum
computed locally and the checksum reported by the remote host do
not match.
"""
if self._checksum_type is None:
return
metadata_key = sync_helpers._get_metadata_key(self._checksum_type)
metadata = await response.json()
remote_checksum = metadata.get(metadata_key)
if remote_checksum is None:
raise common.InvalidResponse(
response,
_UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE.format(metadata_key),
self._get_headers(response),
)
local_checksum = sync_helpers.prepare_checksum_digest(
self._checksum_object.digest()
)
if local_checksum != remote_checksum:
raise common.DataCorruption(
response,
_UPLOAD_CHECKSUM_MISMATCH_MESSAGE.format(
self._checksum_type.upper(), local_checksum, remote_checksum
),
)
def transmit_next_chunk(self, transport, timeout=None):
"""Transmit the next chunk of the resource to be uploaded.
If the current upload was initiated with ``stream_final=False``,
this method will dynamically determine if the upload has completed.
The upload will be considered complete if the stream produces
fewer than :attr:`chunk_size` bytes when a chunk is read from it.
Args:
transport (object): An object which can make authenticated
requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
def _prepare_recover_request(self):
"""Prepare the contents of HTTP request to recover from failure.
This is everything that must be done before a request that doesn't
require network I/O. This is based on the `sans-I/O`_ philosophy.
We assume that the :attr:`resumable_url` is set (i.e. the only way
the upload can end up :attr:`invalid` is if it has been initiated.
Returns:
Tuple[str, str, NoneType, Mapping[str, str]]: The quadruple
* HTTP verb for the request (always PUT)
* the URL for the request
* the body of the request (always :data:`None`)
* headers for the request
The headers **do not** incorporate the ``_headers`` on the
current instance.
Raises:
ValueError: If the current upload is not in an invalid state.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
if not self.invalid:
raise ValueError("Upload is not in invalid state, no need to recover.")
headers = {_helpers.CONTENT_RANGE_HEADER: "bytes */*"}
return _PUT, self.resumable_url, None, headers
def _process_recover_response(self, response):
"""Process the response from an HTTP request to recover from failure.
This is everything that must be done after a request that doesn't
require network I/O (or other I/O). This is based on the `sans-I/O`_
philosophy.
Args:
response (object): The HTTP response object.
Raises:
~google.resumable_media.common.InvalidResponse: If the status
code is not 308.
~google.resumable_media.common.InvalidResponse: If the status
code is 308 and the ``range`` header is not of the form
``bytes 0-{end}``.
.. _sans-I/O: https://sans-io.readthedocs.io/
"""
_helpers.require_status_code(
response,
(http.client.PERMANENT_REDIRECT,),
self._get_status_code,
)
headers = self._get_headers(response)
if _helpers.RANGE_HEADER in headers:
bytes_range = headers[_helpers.RANGE_HEADER]
match = _BYTES_RANGE_RE.match(bytes_range)
if match is None:
raise common.InvalidResponse(
response,
'Unexpected "range" header',
bytes_range,
'Expected to be of the form "bytes=0-{end}"',
)
self._bytes_uploaded = int(match.group("end_byte")) + 1
else:
# In this case, the upload has not "begun".
self._bytes_uploaded = 0
self._stream.seek(self._bytes_uploaded)
self._invalid = False
def recover(self, transport):
"""Recover from a failure.
This method should be used when a :class:`ResumableUpload` is in an
:attr:`~ResumableUpload.invalid` state due to a request failure.
This will verify the progress with the server and make sure the
current upload is in a valid state before :meth:`transmit_next_chunk`
can be used again.
Args:
transport (object): An object which can make authenticated
requests.
Raises:
NotImplementedError: Always, since virtual.
"""
raise NotImplementedError("This implementation is virtual.")
def get_boundary():
"""Get a random boundary for a multipart request.
Returns:
bytes: The boundary used to separate parts of a multipart request.
"""
random_int = random.randrange(sys.maxsize)
boundary = _BOUNDARY_FORMAT.format(random_int)
# NOTE: Neither % formatting nor .format() are available for byte strings
# in Python 3.4, so we must use unicode strings as templates.
return boundary.encode("utf-8")
def construct_multipart_request(data, metadata, content_type):
"""Construct a multipart request body.
Args:
data (bytes): The resource content (UTF-8 encoded as bytes)
to be uploaded.
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
Returns:
Tuple[bytes, bytes]: The multipart request body and the boundary used
between each part.
"""
multipart_boundary = get_boundary()
json_bytes = json.dumps(metadata).encode("utf-8")
content_type = content_type.encode("utf-8")
# Combine the two parts into a multipart payload.
# NOTE: We'd prefer a bytes template but are restricted by Python 3.4.
boundary_sep = _MULTIPART_SEP + multipart_boundary
content = (
boundary_sep
+ _MULTIPART_BEGIN
+ json_bytes
+ _CRLF
+ boundary_sep
+ _CRLF
+ b"content-type: "
+ content_type
+ _CRLF
+ _CRLF
+ data # Empty line between headers and body.
+ _CRLF
+ boundary_sep
+ _MULTIPART_SEP
)
return content, multipart_boundary
def get_total_bytes(stream):
"""Determine the total number of bytes in a stream.
Args:
stream (IO[bytes]): The stream (i.e. file-like object).
Returns:
int: The number of bytes.
"""
current_position = stream.tell()
# NOTE: ``.seek()`` **should** return the same value that ``.tell()``
# returns, but in Python 2, ``file`` objects do not.
stream.seek(0, os.SEEK_END)
end_position = stream.tell()
# Go back to the initial position.
stream.seek(current_position)
return end_position
def get_next_chunk(stream, chunk_size, total_bytes):
"""Get a chunk from an I/O stream.
The ``stream`` may have fewer bytes remaining than ``chunk_size``
so it may not always be the case that
``end_byte == start_byte + chunk_size - 1``.
Args:
stream (IO[bytes]): The stream (i.e. file-like object).
chunk_size (int): The size of the chunk to be read from the ``stream``.
total_bytes (Optional[int]): The (expected) total number of bytes
in the ``stream``.
Returns:
Tuple[int, bytes, str]: Triple of:
* the start byte index
* the content in between the start and end bytes (inclusive)
* content range header for the chunk (slice) that has been read
Raises:
ValueError: If ``total_bytes == 0`` but ``stream.read()`` yields
non-empty content.
ValueError: If there is no data left to consume. This corresponds
exactly to the case ``end_byte < start_byte``, which can only
occur if ``end_byte == start_byte - 1``.
"""
start_byte = stream.tell()
if total_bytes is not None and start_byte + chunk_size >= total_bytes > 0:
payload = stream.read(total_bytes - start_byte)
else:
payload = stream.read(chunk_size)
end_byte = stream.tell() - 1
num_bytes_read = len(payload)
if total_bytes is None:
if num_bytes_read < chunk_size:
# We now **KNOW** the total number of bytes.
total_bytes = end_byte + 1
elif total_bytes == 0:
# NOTE: We also expect ``start_byte == 0`` here but don't check
# because ``_prepare_initiate_request()`` requires the
# stream to be at the beginning.
if num_bytes_read != 0:
raise ValueError(
"Stream specified as empty, but produced non-empty content."
)
else:
if num_bytes_read == 0:
raise ValueError(
"Stream is already exhausted. There is no content remaining."
)
content_range = get_content_range(start_byte, end_byte, total_bytes)
return start_byte, payload, content_range
def get_content_range(start_byte, end_byte, total_bytes):
"""Convert start, end and total into content range header.
If ``total_bytes`` is not known, uses "bytes {start}-{end}/*".
If we are dealing with an empty range (i.e. ``end_byte < start_byte``)
then "bytes */{total}" is used.
This function **ASSUMES** that if the size is not known, the caller will
not also pass an empty range.
Args:
start_byte (int): The start (inclusive) of the byte range.
end_byte (int): The end (inclusive) of the byte range.
total_bytes (Optional[int]): The number of bytes in the byte
range (if known).
Returns:
str: The content range header.
"""
if total_bytes is None:
return _RANGE_UNKNOWN_TEMPLATE.format(start_byte, end_byte)
elif end_byte < start_byte:
return _EMPTY_RANGE_TEMPLATE.format(total_bytes)
else:
return _CONTENT_RANGE_TEMPLATE.format(start_byte, end_byte, total_bytes)
| googleapis/google-resumable-media-python | google/_async_resumable_media/_upload.py | Python | apache-2.0 | 37,337 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators.base import DeprecatedMixin
from tensorflow.python.ops import nn
class DNNClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
"""A classifier for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNClassifier(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns x, Y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
It must be greater than 1.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
config=None):
super(DNNClassifier, self).__init__(model_dir=model_dir,
n_classes=n_classes,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
config=config)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNClassifier, self)._get_train_ops(features, targets)
@property
def weights_(self):
return self.dnn_weights_
@property
def bias_(self):
return self.dnn_bias_
class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""A regressor for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNRegressor(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns x, Y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
config=None):
super(DNNRegressor, self).__init__(model_dir=model_dir,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
config=config)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNRegressor, self)._get_train_ops(features, targets)
@property
def weights_(self):
return self.dnn_weights_
@property
def bias_(self):
return self.dnn_bias_
# TensorFlowDNNClassifier and TensorFlowDNNRegressor are deprecated.
class TensorFlowDNNClassifier(DeprecatedMixin, DNNClassifier,
_sklearn.ClassifierMixin):
pass
class TensorFlowDNNRegressor(DeprecatedMixin, DNNRegressor,
_sklearn.RegressorMixin):
pass
| TakayukiSakai/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn.py | Python | apache-2.0 | 9,116 |
# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deprecated import deprecated
from ..utilities import Dispatcher
conditional = Dispatcher("conditional")
conditional._gpflow_internal_register = conditional.register
# type-ignore below is because mypy doesn't like it when we assign to a function.
conditional.register = deprecated( # type: ignore
reason="Registering new implementations of conditional() is deprecated. "
"Instead, create your own subclass of gpflow.posteriors.AbstractPosterior "
"and register an implementation of gpflow.posteriors.get_posterior_class "
"that returns your class."
)(conditional._gpflow_internal_register)
sample_conditional = Dispatcher("sample_conditional")
| GPflow/GPflow | gpflow/conditionals/dispatch.py | Python | apache-2.0 | 1,281 |
from mock import patch
from unittest import TestCase
from whitefly.models.workspace import Workspace
def always_true(instance):
return True
def always_false(instance):
return False
class TestWorkspace(TestCase):
@patch('os.path.isdir')
def test_workspace_data_dir_exists_should_return_true_when_data_dir_exists(self, path_isdir_mock):
path_isdir_mock.side_effect = always_true
workspace = Workspace("db", "oracle")
ret = workspace.workspace_data_dir_exists()
self.assertEqual(True, ret)
@patch('os.path.isdir')
def test_workspace_data_dir_exists_should_return_false_when_data_dir_not_exists(self, path_isdir_mock):
path_isdir_mock.side_effect = always_false
workspace = Workspace("db", "oracle")
ret = workspace.workspace_data_dir_exists()
self.assertEqual(False, ret)
@patch('os.path.exists')
def test_workspace_config_exists_should_return_true_when_config_exists(self, path_exists_mock):
path_exists_mock.side_effect = always_true
workspace = Workspace("db", "oracle")
ret = workspace.workspace_config_exists()
self.assertEqual(True, ret)
@patch('os.path.exists')
def test_workspace_config_exists_should_return_false_when_config_not_exists(self, path_exists_mock):
path_exists_mock.side_effect = always_false
workspace = Workspace("db", "oracle")
ret = workspace.workspace_config_exists()
self.assertEqual(False, ret)
@patch('os.path.exists')
@patch('os.path.isdir')
def test_validate_should_return_true_when_data_dir_and_config_exists(self, path_exists_mock, path_isdir_mock):
path_exists_mock.side_effect = always_true
path_isdir_mock.side_effect = always_true
workspace = Workspace("db", "oracle")
ret = workspace.validate()
self.assertEqual(True, ret)
@patch('os.path.exists')
@patch('os.path.isdir')
def test_validate_should_return_false_when_data_dir_and_config_not_exists(self, path_exists_mock, path_isdir_mock):
path_exists_mock.side_effect = always_false
path_isdir_mock.side_effect = always_false
workspace = Workspace("db", "oracle")
ret = workspace.validate()
self.assertEqual(False, ret)
| kloiasoft/whitefly | test/models/test_workspace.py | Python | apache-2.0 | 2,294 |
from test.fixture import *
from hypothesis import given
from hypothesis.strategies import text
from astropy.io import fits
import utils.dave_reader as DaveReader
from utils.dave_reader import save_to_intermediate_file, load_dataset_from_intermediate_file
import utils.file_utils as FileUtils
from stingray.events import EventList
from stingray import Lightcurve, Powerspectrum, AveragedCrossspectrum
from hendrics.io import HEN_FILE_EXTENSION
import numpy as np
class TestStingrayTypes():
@classmethod
def setup_class(cls):
cls.dum = 'bubu' + HEN_FILE_EXTENSION
def test_load_and_save_events(self):
events = EventList([0, 2, 3.], pi=[1, 2, 3], mjdref=54385.3254923845,
gti = np.longdouble([[-0.5, 3.5]]))
events.energy = np.array([3., 4., 5.])
save_to_intermediate_file(events, self.dum)
ds = load_dataset_from_intermediate_file(self.dum)
assert ds
def test_load_and_save_lcurve(self):
lcurve = Lightcurve(np.linspace(0, 10, 15), np.random.poisson(30, 15),
mjdref=54385.3254923845,
gti = np.longdouble([[-0.5, 3.5]]))
save_to_intermediate_file(lcurve, self.dum)
ds = load_dataset_from_intermediate_file(self.dum)
assert ds
@given(text())
def test_get_txt_dataset(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "Test_Input_1.txt")
table_id = "EVENTS"
header_names = ["TIME", "PHA", "Color1", "Color2"]
dataset = DaveReader.get_txt_dataset(destination, table_id, header_names)
num_rows = 10
assert dataset
assert len(dataset.tables) == 2
assert table_id in dataset.tables
table = dataset.tables[table_id]
assert len(table.columns) == len(header_names)
assert len(table.columns[header_names[0]].values) == num_rows
@given(text())
def test_get_fits_dataset_lc(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "Test_Input_2.lc")
ds_id = "fits_table"
table_ids = ["Primary", "RATE", "STDGTI"]
hdulist = fits.open(destination)
dataset = DaveReader.get_fits_dataset(hdulist, ds_id, table_ids)
assert dataset
assert len(dataset.tables) == 2
assert table_ids[1] in dataset.tables
assert len(dataset.tables[table_ids[1]].columns) == 4
@given(text())
def test_get_fits_table_column_names(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "test.evt")
# Opening Fits
hdulist = fits.open(destination)
column_names = DaveReader.get_fits_table_column_names(hdulist, "EVENTS")
assert len(column_names) == 2
@given(text())
def test_get_fits_dataset_evt(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "test.evt")
ds_id = "fits_table"
table_ids = ["Primary", "EVENTS", "GTI"]
hdulist = fits.open(destination)
dataset = DaveReader.get_fits_dataset(hdulist, ds_id, table_ids)
assert dataset
assert len(dataset.tables) == 2
assert table_ids[1] in dataset.tables
assert len(dataset.tables[table_ids[1]].columns) == 2
@given(text())
def test_get_events_fits_dataset_with_stingray(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "test.evt")
ds_id = "fits_table"
table_ids = ["Primary", "EVENTS", "GTI"]
# Opening Fits
hdulist = fits.open(destination)
dataset = DaveReader.get_events_fits_dataset_with_stingray(destination, hdulist)
assert dataset
assert len(dataset.tables) == 2
assert table_ids[1] in dataset.tables
assert len(dataset.tables[table_ids[1]].columns) == 2
@given(text())
def test_get_lightcurve_fits_dataset_with_stingray(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "PN_source_lightcurve_raw.lc")
# Opening Fits
hdulist = fits.open(destination)
dataset = DaveReader.get_lightcurve_fits_dataset_with_stingray(destination, hdulist, hduname='RATE',
column='TIME', gtistring='GTI,STDGTI')
assert dataset
@given(text())
def test_get_file_dataset(s):
destination = FileUtils.get_destination(TEST_RESOURCES, "Test_Input_2.lc")
ds_id = "fits_table"
table_ids = ["Primary", "RATE", "STDGTI"]
hdulist = fits.open(destination)
dataset = DaveReader.get_fits_dataset(hdulist, ds_id, table_ids)
assert dataset
assert len(dataset.tables) == 2
assert table_ids[1] in dataset.tables
| StingraySoftware/dave | src/test/python/test/utils/test_dave_reader.py | Python | apache-2.0 | 4,412 |
import unittest
from rx.observable import Observable
from rx.testing import TestScheduler, ReactiveTest
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestWindowWithCount(unittest.TestCase):
def test_window_with_count_basic(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def proj(w, i):
return w.map(lambda x: str(i) + ' ' + str(x))
return xs.window_with_count(3, 2).map(proj).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(280, "1 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(350, "2 6"), on_next(380, "2 7"), on_next(420, "2 8"), on_next(420, "3 8"), on_next(470, "3 9"), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_count_disposed(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def proj(w, i):
return w.map(lambda x: str(i) + ' ' + str(x))
return xs.window_with_count(3, 2).map(proj).merge_observable()
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(280, "1 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(350, "2 6"))
xs.subscriptions.assert_equal(subscribe(200, 370))
def test_window_with_count_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
def selector(w, i):
def mapping(x):
return "%s %s" % (i, x)
return w.map(mapping)
return xs.window_with_count(3, 2).map(selector).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(280, "1 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(350, "2 6"), on_next(380, "2 7"), on_next(420, "2 8"), on_next(420, "3 8"), on_next(470, "3 9"), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
| dbrattli/RxPY | tests/test_observable/test_windowwithcount.py | Python | apache-2.0 | 3,069 |
"""
Time Execution decorator
"""
import socket
import time
from fqn_decorators import Decorator
from fqn_decorators.asynchronous import AsyncDecorator
from pkgsettings import Settings
SHORT_HOSTNAME = socket.gethostname()
settings = Settings()
settings.configure(backends=[], hooks=[], duration_field="value")
def write_metric(name, **metric):
for backend in settings.backends:
backend.write(name, **metric)
def _apply_hooks(hooks, response, exception, metric, func, func_args, func_kwargs):
metadata = dict()
for hook in hooks:
hook_result = hook(
response=response,
exception=exception,
metric=metric,
func=func,
func_args=func_args,
func_kwargs=func_kwargs,
)
if hook_result:
metadata.update(hook_result)
return metadata
class time_execution(Decorator):
def __init__(self, func=None, **params):
self.start_time = None
super(time_execution, self).__init__(func, **params)
def before(self):
self.start_time = time.time()
def after(self):
duration = round(time.time() - self.start_time, 3) * 1000
metric = {"name": self.fqn, settings.duration_field: duration, "hostname": SHORT_HOSTNAME}
origin = getattr(settings, "origin", None)
if origin:
metric["origin"] = origin
hooks = self.params.get("extra_hooks", [])
disable_default_hooks = self.params.get("disable_default_hooks", False)
if not disable_default_hooks:
hooks = settings.hooks + hooks
# Apply the registered hooks, and collect the metadata they might
# return to be stored with the metrics
metadata = _apply_hooks(
hooks=hooks,
response=self.result,
exception=self.get_exception(),
metric=metric,
func=self.func,
func_args=self.args,
func_kwargs=self.kwargs,
)
metric.update(metadata)
write_metric(**metric)
def get_exception(self):
"""Retrieve the exception"""
if self.exc_info is None:
return
exc_type, exc_value, exc_tb = self.exc_info
if exc_value is None:
exc_value = exc_type()
if exc_value.__traceback__ is not exc_tb:
return exc_value.with_traceback(exc_tb)
return exc_value
class time_execution_async(AsyncDecorator, time_execution):
pass
| kpn-digital/py-timeexecution | time_execution/decorator.py | Python | apache-2.0 | 2,501 |
from pykintone.result import Result
from pykintone.comment import RecordComment, Mention
class CreateCommentResult(Result):
def __init__(self, response):
super(CreateCommentResult, self).__init__(response)
self.comment_id = -1
if self.ok:
serialized = response.json()
if "id" in serialized:
self.comment_id = int(serialized["id"])
class SelectCommentResult(Result):
def __init__(self, response):
super(SelectCommentResult, self).__init__(response)
self.raw_comments = []
self.older = False
self.newer = False
if self.ok:
serialized = response.json()
if "comments" in serialized:
self.raw_comments = serialized["comments"]
self.older = serialized["older"]
self.newer = serialized["newer"]
def comments(self):
cs = [RecordComment.deserialize(cd) for cd in self.raw_comments]
for c in cs:
c.mentions = [Mention.deserialize(m) for m in c.mentions]
return cs
| icoxfog417/pykintone | pykintone/comment_result.py | Python | apache-2.0 | 1,085 |
#!/usr/bin/python3
import sys
from fabric.api import env, task, local
__author__ = 'flat'
__version__ = '1.0'
env.hosts = ['localhost']
env.colorize_errors = 'True'
env.disable_known_hosts = 'True'
args = sys.argv[1:]
if not args:
print("Usage: fab { git | add | commmit | push | pull | status} \n")
print("Description:")
print("* git: add + commit + push")
print("* add: track all files")
print("* commit: commit files tracked and write a commment on VI")
print("* push: push the code on the GIT server")
print("* pull: pull new code from the GIT server")
print("* status: check the status of the local repository \n")
sys.exit(1)
@task
def add():
local('/usr/bin/git add -A .')
@task
def commit():
local('/usr/bin/git commit')
@task
def push():
local('/usr/bin/git push')
@task
def pull():
local('/usr/bin/git pull')
@task
def status():
local('/usr/bin/git status')
@task
def git():
add()
commit()
push()
| checco/yate | fabfile.py | Python | apache-2.0 | 985 |
from django.contrib import admin
from .models import personalBinge
from .models import Bingether
from .models import Comment
# Register your models here.
admin.site.register(personalBinge)
admin.site.register(Bingether)
admin.site.register(Comment)
| SavenR/Bingether | app/admin.py | Python | artistic-2.0 | 250 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import absolute_import, division, print_function
import logging
import sys
import click
import canmatrix.convert
import canmatrix.log
logger = logging.getLogger(__name__)
def get_formats():
input = ""
output = ""
for suppFormat, features in canmatrix.formats.supportedFormats.items():
if 'load' in features:
input += suppFormat + "\n"
if 'dump' in features:
output += suppFormat + "\n"
return (input, output)
@click.command()
# global switches
@click.option('-v', '--verbose', 'verbosity', count=True, default=1)
@click.option('-s', '--silent/--no-silent', is_flag=True, default=False, help="don't print status messages to stdout. (only errors)")
@click.option('-f', '--force_output', help="enforce output format, ignoring output file extension (e.g., -f csv).\nSupported formats for writing:\n" + get_formats()[1])
@click.option('-i', '--input_format', 'import_type', help="give hint for input format\nSupported formats for reading:\n" + get_formats()[0])
@click.option('--ignoreEncodingErrors/--no-ignoreEncodingErrors', 'ignoreEncodingErrors', default=False, help="ignore character encoding errors during export (dbc,dbf,sym)")
# manipulation and filter switches
@click.option('--deleteObsoleteDefines/--no-deleteObsoleteDefines', 'deleteObsoleteDefines', default=False, help="delete defines from all ECUs, frames and Signals\nExample --deleteObsoleteDefines")
@click.option('--deleteEcu', 'deleteEcu', help="delete Ecu form databases. (comma separated list)\nSyntax: --deleteEcu=myEcu,mySecondEcu")
@click.option('--renameEcu', 'renameEcu', help="rename Ecu form databases. (comma separated list)\nSyntax: --renameEcu=myOldEcu:myNewEcu,mySecondEcu:mySecondNewEcu")
@click.option('--deleteSignal', 'deleteSignal', help="delete Signal form databases. (comma separated list)\nSyntax: --deleteSignal=mySignal1,mySecondSignal")
@click.option('--renameSignal', 'renameSignal', help="rename Signal form databases. (comma separated list)\nSyntax: --renameSignal=myOldSignal:myNewSignal,mySecondSignal:mySecondNewSignal")
@click.option('--deleteZeroSignals/--no-deleteZeroSignals', 'deleteZeroSignals', default=False, help="delete zero length signals (signals with 0 bit length) from matrix\ndefault False")
@click.option('--deleteSignalAttributes', 'deleteSignalAttributes', help="delete attributes from all signals\nExample --deleteSignalAttributes GenMsgSomeVar,CycleTime")
@click.option('--deleteFrame', 'deleteFrame', help="delete Frame form databases. (comma separated list)\nSyntax: --deleteFrame=myFrame1,mySecondFrame")
@click.option('--renameFrame', 'renameFrame', help="increment each frame.id in database by increment\nSyntax: --frameIdIncrement=increment")
@click.option('--addFrameReceiver', 'addFrameReceiver', help="add receiver Ecu to frame(s) (comma separated list)\nSyntax: --addFrameReceiver=framename:myNewEcu,mySecondEcu:myNEWEcu")
@click.option('--changeFrameId', 'changeFrameId', help="change frame.id in database\nSyntax: --changeFrameId=oldId:newId")
@click.option('--setFrameFd', 'setFrameFd', help="set Frame from database to canfd. (comma separated list)\nSyntax: --setFrameFd=myFrame1,mySecondFrame")
@click.option('--unsetFrameFd', 'unsetFrameFd', help="set Frame from database to normal (not FD). (comma separated list)\nSyntax: --unsetFrameFd=myFrame1,mySecondFrame")
@click.option('--recalcDLC', 'recalcDLC', help="recalculate dlc; max: use maximum of stored and calculated dlc; force: force new calculated dlc")
@click.option('--skipLongDlc', 'skipLongDlc', help="skip all Frames with dlc bigger than given threshold")
@click.option('--cutLongFrames', 'cutLongFrames', help="cut all signals out of Frames with dlc bigger than given threshold")
@click.option('--deleteFrameAttributes', 'deleteFrameAttributes', help="delete attributes from all frames\nExample --deleteFrameAttributes GenMsgSomeVar,CycleTime")
@click.option('--ecus', help="Copy only given ECUs (comma separated list) to target matrix; suffix 'rx' or 'tx' for selection: Example: --ecus FirstEcu:rx,SecondEcu:tx,ThirdEcu")
@click.option('--frames', help="Copy only given Frames (comma separated list) to target matrix")
@click.option('--signals', help="Copy only given Signals (comma separated list) to target matrix just as 'free' signals without containing frame")
@click.option('--merge', help="merge additional can databases.\nSyntax: --merge filename[:ecu=SOMEECU][:frame=FRAME1][:frame=FRAME2],filename2")
# arxml switches
@click.option('--arxmlIgnoreClusterInfo/--no-arxmlIgnoreClusterInfo', 'arxmlIgnoreClusterInfo', default=False, help="Ignore any can cluster info from arxml; Import all frames in one matrix\ndefault False")
@click.option('--arxmlUseXpath(--no-arxmlUseXpath', 'arxmlUseXpath', default=False, help="Use experimental Xpath-Implementation for resolving AR-Paths; \ndefault False")
@click.option('--arxmlExportVersion', 'arVersion', default="3.2.3", help="Set output AUTOSAR version\ncurrently only 3.2.3 and 4.1.0 are supported\ndefault 3.2.3")
# dbc switches
@click.option('--dbcImportEncoding', 'dbcImportEncoding', default="iso-8859-1", help="Import charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcImportCommentEncoding', 'dbcImportCommentEncoding', default="iso-8859-1", help="Import charset of Comments in dbc\ndefault iso-8859-1")
@click.option('--dbcExportEncoding', 'dbcExportEncoding', default="iso-8859-1", help="Export charset of dbc (relevant for units), maybe utf-8\ndefault iso-8859-1")
@click.option('--dbcExportCommentEncoding', 'dbcExportCommentEncoding', default="iso-8859-1", help="Export charset of comments in dbc\ndefault iso-8859-1")
@click.option('--dbcUniqueSignalNames/--no-dbcUniqueSignalNames', 'dbcUniqueSignalNames', default=True, help="Check if signal names are unique per frame")
# dbf switches
@click.option('--dbfImportEncoding', 'dbfImportEncoding', default="iso-8859-1", help="Import charset of dbf, maybe utf-8\ndefault iso-8859-1")
@click.option('--dbfExportEncoding', 'dbfExportEncoding', default="iso-8859-1", help="Export charset of dbf, maybe utf-8\ndefault iso-8859-1")
# sym switches
@click.option('--symImportEncoding', 'symImportEncoding', default="iso-8859-1", help="Import charset of sym format, maybe utf-8\ndefault iso-8859-1")
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# xls/csv switches
@click.option('--xlsMotorolaBitFormat', 'xlsMotorolaBitFormat', default="msbreverse", help="Excel format for startbit of motorola codescharset signals\nValid values: msb, lsb, msbreverse\n default msbreverse")
@click.option('--additionalFrameAttributes', 'additionalFrameAttributes', default = "", help="append columns to csv/xls(x), example: is_fd")
@click.option('--additionalSignalAttributes', 'additionalSignalAttributes', default = "", help="append columns to csv/xls(x), example: is_signed,attributes[\"GenSigStartValue\"]")
@click.option('--xlsValuesInSeperateLines/--no-xlsValuesInSeperateLines', 'xlsValuesInSeperateLines', default = False, help="Excel format: create seperate line for each value of signal value table\tdefault: False")
# json switches
@click.option('--jsonExportCanard/--no-jsonExportCanard', 'jsonExportCanard', default=False, help="Export Canard compatible json format")
@click.option('--jsonExportAll/--no-jsonExportAll', 'jsonExportAll', default=False, help="Export more data to json format")
@click.option('--jsonMotorolaBitFormat', 'jsonMotorolaBitFormat', default="lsb", help="Json format: startbit of motorola signals\nValid values: msb, lsb, msbreverse\n default lsb")
@click.option('--jsonNativeTypes/--no-jsonNativeTypes', 'jsonNativeTypes', default=False, help="Uses native json representation for decimals instead of string.")
#sym switches
@click.option('--symExportEncoding', 'symExportEncoding', default="iso-8859-1", help="Export charset of sym format, maybe utf-8\ndefault iso-8859-1")
# in and out file
@click.argument('infile', required=True)
@click.argument('outfile', required=True)
#
def cli_convert(infile, outfile, silent, verbosity, **options):
"""
canmatrix.cli.convert [options] import-file export-file
import-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym
export-file: *.dbc|*.dbf|*.kcd|*.arxml|*.json|*.xls(x)|*.sym|*.py
\n"""
root_logger = canmatrix.log.setup_logger()
if silent is True:
# only print error messages, ignore verbosity flag
verbosity = -1
options["silent"] = True
canmatrix.log.set_log_level(root_logger, verbosity)
if options["ignoreEncodingErrors"]:
options["ignoreEncodingErrors"] = "ignore"
else:
options["ignoreEncodingErrors"] = ""
canmatrix.convert.convert(infile, outfile, **options)
return 0
if __name__ == '__main__':
sys.exit(cli_convert())
| altendky/canmatrix | src/canmatrix/cli/convert.py | Python | bsd-2-clause | 10,309 |
#:coding=utf-8:
from django.test import TestCase as DjangoTestCase
from django.forms import Form
from beproud.django.commons.forms import EmailField
__all__ = (
'EmailFieldTest',
'JSONFormFieldTest',
'JSONWidgetTest',
)
class EmailTestForm(Form):
email = EmailField(label="email")
class EmailFieldTest(DjangoTestCase):
def test_basic_email(self):
form = EmailTestForm({"email": "spam@eggs.com"})
self.assertTrue(form.is_valid())
def test_keitai_email(self):
form = EmailTestForm({"email": "-spam..eggs-@softbank.ne.jp"})
self.assertTrue(form.is_valid())
form = EmailTestForm({"email": ".*&$.-spam..!!eggs!!-.*.@ezweb.ne.jp"})
self.assertTrue(form.is_valid())
def test_plus_email(self):
form = EmailTestForm({"email": "spam+extra@eggs.com"})
self.assertTrue(form.is_valid())
def test_multi_email(self):
form = EmailTestForm({"email": "aaa spam+extra@eggs.com email@email.com"})
self.assertFalse(form.is_valid())
def test_longtld(self):
form = EmailTestForm({"email": "spam@eggs.engineer"})
self.assertTrue(form.is_valid())
def test_punycode(self):
form = EmailTestForm({"email": "spam@eggs.xn--i1b6b1a6a2e"})
self.assertTrue(form.is_valid())
| beproud/bpcommons | tests/test_forms_field.py | Python | bsd-2-clause | 1,310 |
# pylint: disable=W0401
from django.db.backends.oracle.compiler import *
| JohnPapps/django-oracle-drcp | django-oracle-drcp/compiler.py | Python | bsd-2-clause | 73 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit
class TestControlFlow(unittest.TestCase):
def test_loop_carried_dep_promotion(self):
@jit
def f(n):
sum = 0
for i in range(n):
sum += float(i)
return sum
self.assertEqual(f(10), 45.0)
def test_nested_rectangular(self):
@jit
def f(n):
sum = 0
for i in range(n):
for j in range(n):
for k in range(n):
sum += i * j
return sum
self.assertEqual(f(3), f.py_func(3))
def test_for_continue(self):
@jit
def f(n):
sum = 0
for i in range(n):
if i > n - 4:
continue
sum += i
return sum
self.assertEqual(f(10), f.py_func(10))
def test_for_break(self):
@jit
def f(n):
sum = 0
for i in range(n):
if i > n - 4:
break
sum += i
return sum
self.assertEqual(f(10), f.py_func(10))
def test_while_continue(self):
@jit
def f(n):
i = sum = 0
while i < n:
i += 1
if i > n - 4:
continue
sum += i
return sum
self.assertEqual(f(10), f.py_func(10))
def test_while_break(self):
@jit
def f(n):
i = sum = 0
while i < n:
if i > n - 4:
break
sum += i
i += 1
return sum
self.assertEqual(f(10), f.py_func(10))
def test_moderately_complicated(self):
@jit
def f(n):
i = 0
sum = 0
for i in range(n):
if i % 4 > 2:
while i > 0:
sum += i
i -= 1
return sum
self.assertEqual(f(10), f.py_func(10))
def test_complicated(self):
@jit
def f(n):
sum = 0
for i in range(n):
if i % 4 > 2:
while i > 0:
for j in range(n):
i -= 1
for k in range(n):
while k != 0:
sum += i * j
break
else:
continue
break
return sum
self.assertEqual(f(3), f.py_func(3))
if __name__ == '__main__':
#TestControlFlow('test_reduction').debug()
unittest.main(verbosity=3) | flypy/flypy | flypy/tests/test_control_flow.py | Python | bsd-2-clause | 2,894 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
TT_Postscript__init__ = """
A TrueType PostScript table.
"""
TT_Postscript_format_type = """
Format of this table.
"""
TT_Postscript_italic_angle = """
Italic angle in degrees.
"""
TT_Postscript_underline_position = """
Underline position.
"""
TT_Postscript_underline_thickness = """
Underline thickness.
"""
TT_Postscript_is_fixed_pitch = """
If `True`, the font is monospaced.
"""
TT_Postscript_min_mem_type42 = """
Minimum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_max_mem_type42 = """
Maximum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_min_mem_type1 = """
Minimum memory usage when the font is downloaded as a Type 1 font.
"""
TT_Postscript_max_mem_type1 = """
Maximum memory usage when the font is downloaded as a Type 1 font.
"""
| mdboom/freetypy | docstrings/tt_postscript.py | Python | bsd-2-clause | 2,490 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from teacher import *
from Views.teacher_view import get_teacher_view, get_overview, get_year_overview, get_class_overview, get_assignment_overview
from Views.set_exercise import get_set_exercise_page, send_exercise_to_class, get_view_spec_form
from Views.authenticate import authenticate_student, authenticate_teacher, check_user_name_exists
from Views.login import student_login, teacher_login
from Views.register import register_student, register_teacher
from Views.student_view import get_student_view
from Views.submit_code import submit_student_code, run_self_test
from Views.single_exercise_code_view import single_exercise_view
from Views.student_grades import student_grades_view
from Views.home import home_page
from Views.logout import logout_user
from Views.view_spec import view_spec, get_exercise_details
from Views.settings import teacher_account_settings, delete_teaching_class, student_account_settings, class_settings, change_password, change_email, get_registered_students_in_course, add_new_class, update_class_name, update_course_students, get_student_submission
from Views.add_new_exercise import add_new_exercise, create_exercise
from Views.view_submissions import view_student_submissions, view_submissions_teacher, get_student_feedback, submit_student_feedback
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^teacher/$', viewSubmissionMark),
url(r'^selectable/', include('selectable.urls')),
url(r'^teacher/class-settings/manage-class/$', update_course_students),
url(r'^teacher/class-settings/delete-class/$', delete_teaching_class),
url(r'^teacher/get-overview/', get_overview),
url(r'^teacher/get-year-overview/', get_year_overview),
url(r'^teacher/get-class-overview/', get_class_overview),
url(r'^teacher/get-assignment-overview/', get_assignment_overview),
url(r'^student/changepassword/$', change_password),
url(r'^student/view-submissions/get-feedback/(\d+)/$', get_student_feedback),
url(r'^teacher/changepassword/$', change_password),
url(r'^teacher/get-students-in-class/$', get_registered_students_in_course),
url(r'^teacher/add-new-class/$', add_new_class),
url(r'^teacher/get-exercise/$', get_exercise_details),
url(r'^teacher/update-class-name/', update_class_name),
url(r'^teacher/submit-exercise/', send_exercise_to_class),
url(r'^student/changeemail/$', change_email),
url(r'^teacher/changeemail/$', change_email),
url(r'^teacher/account-settings/', teacher_account_settings),
url(r'^student/account-settings/', student_account_settings),
url(r'^class-settings/', class_settings),
url(r'^teacher-view/$', get_teacher_view),
url(r'^set-exercise/$', get_set_exercise_page),
url(r'^authenticate_student/$', authenticate_student),
url(r'^authenticate_teacher/$', authenticate_teacher),
url(r'^student-login/$', student_login),
url(r'^teacher-login/$', teacher_login),
url(r'^register-student/$', register_student),
url(r'^register-teacher/$', register_teacher),
url(r'^student-view/$', get_student_view),
url(r'^submit-code/(\d+)/$', submit_student_code),
url(r'^code-single-exercise/(\d+)/$', single_exercise_view),
url(r'^student-grades/$', student_grades_view),
url(r'^logout/$', logout_user),
url(r'^view-spec/$', view_spec),
url(r'^check-username/$', check_user_name_exists),
url(r'^student/test/self-defined/$', run_self_test),
url(r'^teacher/add-new-exercise/$',add_new_exercise),
url(r'^teacher/add-new-exercise/submit-exercise/$',create_exercise),
url(r'^student/view-submissions/$', view_student_submissions),
url(r'^teacher/view-submissions/$', view_submissions_teacher),
url(r'^teacher/view-submissions/send-feedback/$', submit_student_feedback),
url(r'^teacher/get-student-submission/$', get_student_submission),
url(r'^teacher/set-exercise/view-spec-form/(\d+)/$', get_view_spec_form),
url(r'^$', home_page)
)
| varun-verma11/CodeDrill | djangoSRV/djangoSRV/urls.py | Python | bsd-2-clause | 4,085 |
import nose
import angr
import logging
l = logging.getLogger("angr_tests.path_groups")
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
addresses_fauxware = {
'armel': 0x8524,
'armhf': 0x104c9, # addr+1 to force thumb
#'i386': 0x8048524, # commenting out because of the freaking stack check
'mips': 0x400710,
'mipsel': 0x4006d0,
'ppc': 0x1000054c,
'ppc64': 0x10000698,
'x86_64': 0x400664
}
def run_fauxware(arch, threads):
p = angr.Project(location + '/' + arch + '/fauxware', load_options={'auto_load_libs': False})
pg = p.factory.path_group(threads=threads)
nose.tools.assert_equal(len(pg.active), 1)
nose.tools.assert_equal(pg.active[0].length, 0)
# step until the backdoor split occurs
pg2 = pg.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune())
nose.tools.assert_equal(len(pg2.active), 2)
nose.tools.assert_true(any("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
nose.tools.assert_false(all("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
# separate out the backdoor and normal paths
pg3 = pg2.stash(lambda path: "SOSNEAKY" in path.state.posix.dumps(0), to_stash="backdoor").stash_all(to_stash="auth")
nose.tools.assert_equal(len(pg3.active), 0)
nose.tools.assert_equal(len(pg3.backdoor), 1)
nose.tools.assert_equal(len(pg3.auth), 1)
# step the backdoor path until it returns to main
pg4 = pg3.step(until=lambda lpg: lpg.backdoor[0].jumpkinds[-1] == 'Ijk_Ret', stash='backdoor')
main_addr = pg4.backdoor[0].addr
nose.tools.assert_equal(len(pg4.active), 0)
nose.tools.assert_equal(len(pg4.backdoor), 1)
nose.tools.assert_equal(len(pg4.auth), 1)
# now step the real path until the real authentication paths return to the same place
pg5 = pg4.explore(find=main_addr, num_find=2, stash='auth').unstash_all(from_stash='found', to_stash='auth')
nose.tools.assert_equal(len(pg5.active), 0)
nose.tools.assert_equal(len(pg5.backdoor), 1)
nose.tools.assert_equal(len(pg5.auth), 2)
# now unstash everything
pg6 = pg5.unstash_all(from_stash='backdoor').unstash_all(from_stash='auth')
nose.tools.assert_equal(len(pg6.active), 3)
nose.tools.assert_equal(len(pg6.backdoor), 0)
nose.tools.assert_equal(len(pg6.auth), 0)
nose.tools.assert_equal(len(set(pg6.mp_active.addr.mp_items)), 1)
# now merge them!
pg7 = pg6.merge()
nose.tools.assert_equal(len(pg7.active), 1)
nose.tools.assert_equal(len(pg7.backdoor), 0)
nose.tools.assert_equal(len(pg7.auth), 0)
#import ipdb; ipdb.set_trace()
#print pg2.mp_active.addr.mp_map(hex).mp_items
# test selecting paths to step
pg_a = p.factory.path_group(immutable=True)
pg_b = pg_a.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
pg_c = pg_b.step(selector_func=lambda p: p is pg_b.active[0], step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
nose.tools.assert_is(pg_b.active[1], pg_c.active[0])
nose.tools.assert_is_not(pg_b.active[0], pg_c.active[1])
total_active = len(pg_c.active)
# test special stashes
nose.tools.assert_equals(len(pg_c.stashed), 0)
pg_d = pg_c.stash(filter_func=lambda p: p is pg_c.active[1], to_stash='asdf')
nose.tools.assert_equals(len(pg_d.stashed), 0)
nose.tools.assert_equals(len(pg_d.asdf), 1)
nose.tools.assert_equals(len(pg_d.active), total_active-1)
pg_e = pg_d.stash(from_stash=pg_d.ALL, to_stash='fdsa')
nose.tools.assert_equals(len(pg_e.asdf), 0)
nose.tools.assert_equals(len(pg_e.active), 0)
nose.tools.assert_equals(len(pg_e.fdsa), total_active)
pg_f = pg_e.stash(from_stash=pg_e.ALL, to_stash=pg_e.DROP)
nose.tools.assert_true(all(len(s) == 0 for s in pg_f.stashes.values()))
def test_fauxware():
for arch in addresses_fauxware:
yield run_fauxware, arch, None
yield run_fauxware, arch, 2
if __name__ == "__main__":
for func, march, threads in test_fauxware():
print 'testing ' + march
func(march, threads)
| haylesr/angr | tests/test_path_groups.py | Python | bsd-2-clause | 4,184 |
import numpy as np
import sys
from trw_utils import *
from heterogenous_crf import inference_gco
from pyqpbo import binary_general_graph
from scipy.optimize import fmin_l_bfgs_b
def trw(node_weights, edges, edge_weights, y,
max_iter=100, verbose=0, tol=1e-3,
get_energy=None):
n_nodes, n_states = node_weights.shape
n_edges = edges.shape[0]
y_hat = []
lambdas = np.zeros(n_nodes)
mu = np.zeros((n_nodes, n_states))
learning_rate = 0.1
energy_history = []
primal_history = []
pairwise = []
for k in xrange(n_states):
y_hat.append(np.zeros(n_states))
_pairwise = np.zeros((n_edges, 2, 2))
for i in xrange(n_edges):
_pairwise[i,1,0] = _pairwise[i,0,1] = -0.5 * edge_weights[i,k,k]
pairwise.append(_pairwise)
for i in xrange(n_edges):
e1, e2 = edges[i]
node_weights[e1,:] += 0.5 * np.diag(edge_weights[i,:,:])
node_weights[e2,:] += 0.5 * np.diag(edge_weights[i,:,:])
for iteration in xrange(max_iter):
dmu = np.zeros((n_nodes, n_states))
unaries = node_weights + mu
x, f_val, d = fmin_l_bfgs_b(f, np.zeros(n_nodes),
args=(unaries, pairwise, edges),
maxiter=50,
pgtol=1e-5)
E = np.sum(x)
for k in xrange(n_states):
new_unaries = np.zeros((n_nodes, 2))
new_unaries[:,1] = unaries[:,k] + x
y_hat[k], energy = binary_general_graph(edges, new_unaries, pairwise[k])
E -= 0.5*energy
dmu[:,k] -= y_hat[k]
y_hat_kappa, energy = optimize_kappa(y, mu, 1, n_nodes, n_states)
E += energy
dmu[np.ogrid[:dmu.shape[0]], y_hat_kappa] += 1
mu -= learning_rate * dmu
energy_history.append(E)
lambda_sum = np.zeros((n_nodes, n_states))
for k in xrange(n_states):
lambda_sum[:,k] = y_hat[k]
lambda_sum = lambda_sum / np.sum(lambda_sum, axis=1, keepdims=True)
if get_energy is not None:
primal = get_energy(get_labelling(lambda_sum))
primal_history.append(primal)
else:
primal = 0
if iteration:
learning_rate = 1. / np.sqrt(iteration)
if verbose:
print 'Iteration {}: energy={}, primal={}'.format(iteration, E, primal)
if iteration > 0 and np.abs(E - energy_history[-2]) < tol:
if verbose:
print 'Converged'
break
info = {'primal': primal_history,
'dual': energy_history,
'iteration': iteration}
return lambda_sum, y_hat_kappa, info
def f(x, node_weights, pairwise, edges):
n_nodes, n_states = node_weights.shape
dual = 0
dlambda = np.zeros(n_nodes)
for k in xrange(n_states):
new_unaries = np.zeros((n_nodes, 2))
new_unaries[:,1] = node_weights[:,k] + x
y_hat, energy = binary_general_graph(edges, new_unaries, pairwise[k])
dual += 0.5 * energy
dlambda += y_hat
dlambda -= 1
dual -= np.sum(x)
#print dual
return -dual, -dlambda
| kondra/latent_ssvm | smd.py | Python | bsd-2-clause | 3,199 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from app import create_app, celery
app = create_app()
| taogeT/flask-celery | example/celery_run.py | Python | bsd-2-clause | 101 |
#!/usr/bin/env python
#
# Original filename: config.py
#
# Author: Tim Brandt
# Email: tbrandt@astro.princeton.edu
# Date: August 2011
#
# Summary: Set configuration parameters to sensible values.
#
import re
from subprocess import *
import multiprocessing
import numpy as np
def config(nframes, framesize):
###################################################################
# Fetch the total amount of physical system memory in bytes.
# This is the second entry on the second line of the standard
# output of the 'free' command.
###################################################################
print "\nGetting system parameters, setting pipeline execution parameters..."
osver = Popen(["uname", "-a"], stdout=PIPE).stdout.read()
if osver.startswith("Linux"):
print "You are running Linux."
elif osver.startswith("Darwin"):
print "You are running Mac OS-X."
else:
print "Your operating system is not recognized."
if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read()
mem = int(mem.split('\n')[1].split()[1])
elif osver.startswith("Darwin"):
mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n')
blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1)
totmem = 0.
for line in mem:
if np.any(["Pages free:" in line, "Pages active:" in line,
"Pages inactive:" in line, "Pages speculative:" in line,
"Pages wired down:" in line]):
totmem += float(line.split(':')[1]) * float(blocksize)
mem = int(totmem)
ncpus = multiprocessing.cpu_count()
hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0]
print "\n You are running on " + hostname + "."
print " You have " + str(mem / 2**20) + " megabytes of memory and " + \
str(ncpus) + " threads available."
datasize = framesize * nframes * 4
print " The dataset consists of " + str(nframes) + " frames, " + \
str(datasize * 100 / mem) + "% of your physical RAM."
storeall = False
if datasize * 100 / mem < 20:
storeall = True
print " --> You have enough RAM to store all data."
print " The pipeline will not need to write all intermediate files."
else:
print " --> You do not have enough RAM to store all data."
print " The pipeline will need to write all intermediate files"
print " and do the reduction in pieces."
return mem, ncpus, storeall
| t-brandt/acorns-adi | utils/config.py | Python | bsd-2-clause | 2,628 |
"""
Example showing running Flexx' event loop in another thread.
This is not a recommended use in general.
Most parts of Flexx are not thread-save. E.g. setting properties
should generally only be done from a single thread. Event handlers
are *always* called from the same thread that runs the event loop
(unless manually called).
The app.create_server() is used to (re)create the server object. It is
important that the used IOLoop is local to the thread. This can be
accomplished by calling create_server() and start() from the same
thread, or using ``new_loop=True`` (as is done here).
"""
import time
import threading
from flexx import app, event
class MyModel1(event.HasEvents):
@event.prop
def foo(self, v=0):
return v
@event.connect('foo')
def on_foo(self, *events):
for ev in events:
print('foo changed to', ev.new_value)
# Create model in main thread
model = MyModel1()
# Start server in its own thread
app.create_server(new_loop=True)
t = threading.Thread(target=app.start)
t.start()
# Manipulate model from main thread (the model's on_foo() gets called from other thread)
for i in range(5, 9):
time.sleep(1)
model.foo = i
# Stop event loop (this is thread-safe) and wait for thread to end
app.stop()
t.join()
| JohnLunzer/flexx | flexx/app/examples/flexx_in_thread.py | Python | bsd-2-clause | 1,287 |
#!/usr/bin/env python
"""
a class to access the REST API of the website www.factuursturen.nl
"""
import collections
import ConfigParser
from datetime import datetime, date
import re
import requests
from os.path import expanduser
import copy
import urllib
__author__ = 'Reinoud van Leeuwen'
__copyright__ = "Copyright 2013, Reinoud van Leeuwen"
__license__ = "BSD"
__maintainer__ = "Reinoud van Leeuwen"
__email__ = "reinoud.v@n.leeuwen.net"
CONVERTABLEFIELDS = {
'clients' : {'clientnr': 'int',
'showcontact': 'bool',
'tax_shifted': 'bool',
'lastinvoice': 'date',
'top': 'int',
'stddiscount': 'float',
'notes_on_invoice': 'bool',
'active': 'bool',
'default_email': 'int',
'timestamp': 'date'},
'products': {'id': 'int',
'price': 'float',
'taxes': 'int'},
'invoices': {'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'collection': 'bool',
'tax': 'float',
'totalintax': 'float',
'sent': 'date',
'uncollectible': 'date',
'lastreminder': 'date',
'open': 'float',
'paiddate': 'date',
'duedate': 'date',
'overwrite_if_exist': 'bool',
'initialdate': 'date',
'finalsenddate': 'date'},
'invoices_payment': {'date': 'date'},
'invoices_saved': {'id': 'int',
'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'totaldiscount': 'float',
'totalintax': 'float',
'clientnr': 'int'},
'invoices_repeated': {'id': 'int',
'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'datesaved': 'date',
'totalintax': 'float',
'initialdate': 'date',
'nextsenddate': 'date',
'finalsenddate': 'date',
'clientnr': 'int'},
'profiles': {'id': 'int'},
'countrylist' : {'id': 'int'},
'taxes': {'percentage': 'int',
'default': 'bool'}
}
API = {'getters' : ['clients',
'products',
'invoices',
'invoices_saved',
'invoices_repeated',
'profiles',
'balance',
'countrylist',
'taxes'],
'single_getters' : ['invoices_pdf'],
'posters' : ['clients',
'products',
'invoices'],
'putters' : ['clients',
'products',
'invoices_payment'],
'deleters' : ['clients',
'products',
'invoices',
'invoices_saved',
'invoices_repeated']}
class FactuursturenError(Exception):
"""Base class for exceptions in this module."""
def __init__(self, value = ''):
self.value = value
def __str__(self):
return repr(self.value)
class FactuursturenGetError(FactuursturenError):
pass
class FactuursturenPostError(FactuursturenError):
pass
class FactuursturenWrongPostvalue(FactuursturenError):
pass
class FactuursturenWrongPutvalue(FactuursturenError):
pass
class FactuursturenEmptyResult(FactuursturenError):
pass
class FactuursturenNoAuth(FactuursturenError):
pass
class FactuursturenConversionError(FactuursturenError):
pass
class FactuursturenWrongCall(FactuursturenError):
pass
class FactuursturenNotFound(FactuursturenError):
pass
class FactuursturenNoMoreApiCalls(FactuursturenError):
pass
class Client:
"""
client class to access www.factuursturen.nl though REST API
"""
def __init__(self,
apikey='',
username='',
configsection='default',
host='www.factuursturen.nl',
protocol='https',
apipath='/api',
version='v0'):
"""
initialize object
When apikey and username are not present, look for INI-style file .factuursturen_rc
in current directory and homedirectory to find those values there.
when only username is present, try to find apikey in configfilesection where it is defined
:param apikey: APIkey (string) as generated online on the website http://www.factuursturen.nl
:param username: accountname for the website
:param configsection: section in file ~/.factuursturen_rc where apikey and username should be present
"""
self._url = protocol + '://' + host + apipath + '/' + version + '/'
# try to read auth details from file when not passed
config = ConfigParser.RawConfigParser()
config.read(['.factuursturen_rc', expanduser('~/.factuursturen_rc')])
if (not apikey) and (not username):
try:
self._apikey = config.get(configsection, 'apikey')
self._username = config.get(configsection, 'username')
except ConfigParser.NoSectionError:
raise FactuursturenNoAuth ('key and username not given, nor found in .factuursturen_rc or ~/.factuursturen_rc')
except ConfigParser.NoOptionError:
raise FactuursturenNoAuth ('no complete auth found')
elif username and (not apikey):
self._username = username
for section in config.sections():
if config.get(section, 'username') == username:
self._apikey = config.get(section, 'apikey')
if not self._apikey:
raise FactuursturenNoAuth ('no apikey found for username {}'.format(username))
else:
if not (apikey and username):
raise FactuursturenNoAuth ('no complete auth passed to factuursturen.Client')
self._apikey = apikey
self._username = username
# remaining allowed calls to API
self._remaining = None
self._lastresponse = None
self._headers = {'content-type': 'application/json',
'accept': 'application/json'}
# keep a list of which functions can be used to convert the fields
# from and to a string
self._convertfunctions = {'fromstring': {'int': self._string2int,
'bool': self._string2bool,
'float': self._string2float,
'date': self._string2date},
'tostring': {'int': self._int2string,
'bool': self._bool2string,
'float': self._float2string,
'date': self._date2string}}
# single value conversionfunctions
def _string2int(self, string):
try:
return int(string)
except ValueError:
raise FactuursturenConversionError('cannot convert {} to int'.format(string))
def _string2bool(self, string):
return string.lower() in ("yes", "true", "t", "1")
def _string2float(self, string):
try:
return float(string)
except ValueError:
raise FactuursturenConversionError('cannot convert {} to float'.format(string))
def _string2date(self, string):
if string == '':
return None
try:
return datetime.strptime(string, '%Y-%m-%d')
except ValueError:
raise FactuursturenConversionError('cannot convert {} to date'.format(string))
def _int2string(self, number):
if not isinstance(number, int):
raise FactuursturenConversionError('number {} should be of type int'.format(number))
return str(number)
def _bool2string(self, booleanvalue):
if not isinstance(booleanvalue, int):
raise FactuursturenConversionError('booleanvalue should be of type bool')
return str(booleanvalue).lower()
def _float2string(self, number):
if not (isinstance(number, float) or (isinstance(number, int))):
raise FactuursturenConversionError('number {} should be of type float'.format(number))
return str(number)
def _date2string(self, date):
if not isinstance(date, datetime):
raise FactuursturenConversionError('date should be of type datetime')
return date.strftime("%Y-%m-%d")
def _convertstringfields_in_dict(self, adict, function, direction):
"""convert fields of a single dict either from or to strings
fieldnames to convert are read from CONVERTIBLEFIELDS dict, which
is in essence a datadictionary for this API
:param adict: dictionary to convert
:param function: callable function in the API ('clients', 'products' etc)
:param direction: either 'tostring' or 'fromstring'
"""
if direction not in self._convertfunctions:
raise FactuursturenWrongCall ('_convertstringfields_in_dict called with {}'.format(direction))
if function in CONVERTABLEFIELDS:
for key, value in adict.iteritems():
if key in CONVERTABLEFIELDS[function]:
# note: target is something like 'int'. Depending
# on conversion direction, this is the source or the target
target = CONVERTABLEFIELDS[function][key]
conversion_function = self._convertfunctions[direction][target]
try:
adict[key] = conversion_function(value)
except FactuursturenConversionError:
print "key = {}, value = {}, direction = {}, target = {}".format(key, value, direction, target)
raise BaseException
return adict
def _convertstringfields_in_list_of_dicts(self, alist, function, direction):
"""convert each dict in the list
Basically, a loop over the function _convertstringfields_in_dict
:param alist: a list of dicts
:param function: callable function in the API ('clients', 'products' etc)
:param direction: either 'tostring' or 'fromstring'
"""
if direction not in self._convertfunctions:
raise FactuursturenWrongCall ('_convertstringfields_in_list_of_dicts called with {}'.format(direction))
for index, entry in enumerate(alist):
alist[index] = self._convertstringfields_in_dict(alist[index], function, direction)
return alist
def _flatten(self, adict, parent_key=''):
"""flatten a nested dict
The API expects nested dicts to be flattened when posting
{'lines': {'line1': {'amount': 1,
'tax': 21},
'line2': {'amount': 2,
'tax': 21}
}
}
to
{'lines[line1][amount]': 1,
'lines[line1][tax]': 21,
'lines[line2][amount]': 2,
'lines[line2][tax]': 21
}
:param adict: a nested dict
:param parent_key: should be empty, used for recursion
"""
items = []
for k, v in adict.items():
new_key = parent_key + '[' + k + ']' if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def _fixkeynames(self, adict):
"""replace keynames in dict
replace keys like 'lines[line0][amount_desc]'
with 'lines[0][amount_desc]'
(keeping the same value)
:param adict: dictionary to be changed
"""
for key, val in adict.items():
fields = re.split('\]\[', key)
if len(fields) > 1:
leftfields = re.split('\[', fields[0])
middlefield = re.sub("[^0-9]", "", leftfields[1])
newfield = leftfields[0] + '[' + middlefield + '][' + fields[1]
adict[newfield] = val
del adict[key]
return adict
def _prepare_for_send(self, adict, function):
"""fix dict so it can be posted
:param adict: dictionary to be posted
:param function: callable function from the API ('clients', 'products', etc)
"""
adict = self._convertstringfields_in_dict(adict, function, 'tostring')
adict = self._flatten(adict)
adict = self._fixkeynames(adict)
return adict
def _escape_characters(self, string):
"""escape unsafe webcharacters to use in API call
by default urllib considers '/' as safe, override the default for the second argument by
considering nothing safe
"""
return urllib.quote(str(string), safe='')
@property
def remaining(self):
"""return remaining allowed API calls (for this hour)"""
return self._remaining
@property
def ok(self):
"""return status of last call"""
return self._lastresponse
def post(self, function, objData):
"""Generic wrapper for all POSTable functions
errors from server during post (like wrong values) are propagated to the exceptionclass
:param function: callabe function from the API ('clients', 'products', etc)
:param objData: data to be posted
"""
fullUrl = self._url + function
objData_local = copy.deepcopy(objData)
if function not in API['posters']:
raise FactuursturenPostError("{function} not in available POSTable functions".format(function=function))
if isinstance(objData_local, dict):
objData_local = self._prepare_for_send(objData_local, function)
response = requests.post(fullUrl,
data=objData_local,
auth=(self._username, self._apikey))
self._lastresponse = response.ok
if response.ok:
self._remaining = int(response.headers['x-ratelimit-remaining'])
return response.content
else:
raise FactuursturenWrongPostvalue(response.content)
def put(self, function, objId, objData):
"""Generic wrapper for all PUTable functions
errors from server during post (like wrong values) are propagated to the exceptionclass
:param function: callabe function from the API ('clients', 'products', etc)
:param objId: id of object to be put (usually retrieved from the API)
:param objData: data to be posted. All required fields should be present, or the API will not accept the changes
"""
fullUrl = self._url + function + '/{objId}'.format(objId=self._escape_characters(objId))
if function not in API['putters']:
raise FactuursturenPostError("{function} not in available PUTable functions".format(function=function))
if isinstance(objData, dict):
objData = self._prepare_for_send(objData, function)
response = requests.put(fullUrl,
data=objData,
auth=(self._username, self._apikey))
self._lastresponse = response.ok
if response.ok:
self._remaining = int(response.headers['x-ratelimit-remaining'])
return
else:
raise FactuursturenWrongPutvalue(response.content)
def delete(self, function, objId):
"""Generic wrapper for all DELETEable functions
errors from server during post (like wrong values) are propagated to the exceptionclass
:param function: callabe function from the API ('clients', 'products', etc)
:param objId: id of object to be put (usually retrieved from the API)
"""
fullUrl = self._url + function + '/{objId}'.format(objId=self._escape_characters(objId))
if function not in API['deleters']:
raise FactuursturenPostError("{function} not in available DELETEable functions".format(function=function))
response = requests.delete(fullUrl,
auth=(self._username, self._apikey))
self._lastresponse = response.ok
if response.ok:
self._remaining = int(response.headers['x-ratelimit-remaining'])
else:
raise FactuursturenError(response.content)
def get(self, function, objId=None):
"""Generic wrapper for all GETtable functions
when no objId is passed, retrieve all objects (in a list of dicts)
when objId is passed, only retrieve a single object (in a single dict)
:param function: callabe function from the API ('clients', 'products', etc)
:param objId: id of object to be put (usually retrieved from the API)
"""
# TODO: some errorchecking:
# - on function
# - on return
# - on network error
# - on password
# - on remaining allowed requests
fullUrl = self._url + function
# check function against self.getters and self.singleGetters
if function not in API['getters'] + API['single_getters']:
raise FactuursturenGetError("{function} not in available GETtable functions".format(function=function))
if objId:
fullUrl += '/{objId}'.format(objId=self._escape_characters(objId))
response = requests.get(fullUrl,
auth=(self._username, self._apikey),
headers=self._headers)
self._lastresponse = response.ok
# when one record is returned, acces it normally so
# return the single element of the dict that is called 'client'
# when the functioncall was 'clients/<id>
singlefunction = function[:-1]
self._remaining = int(response.headers['x-ratelimit-remaining'])
if response.ok:
if function == 'invoices_pdf':
return response.content
try:
raw_structure = response.json()
if objId is None:
retval = self._convertstringfields_in_list_of_dicts(raw_structure, function, 'fromstring')
else:
retval = self._convertstringfields_in_dict(raw_structure[singlefunction], function, 'fromstring')
except FactuursturenError as error:
print error
retval = response.content
return retval
else:
# TODO: more checking
if response.status_code == 404:
raise FactuursturenNotFound (response.content)
elif self._remaining == 0:
raise FactuursturenNoMoreApiCalls ('limit of API calls reached.')
else:
raise FactuursturenEmptyResult (response.content)
| reinoud/factuursturen | factuursturen/__init__.py | Python | bsd-2-clause | 19,420 |
#===========================================================================
#
# Port to use for the web server. Configure the Eagle to use this
# port as it's 'cloud provider' using http://host:PORT
#
#===========================================================================
httpPort = 22042
#===========================================================================
#
# MQTT topic names
#
#===========================================================================
# Meter reading topic (reports current meter reading in kWh)
mqttEnergy = 'power/elec/Home/energy'
# Instantaneous power usage topic (reports power usage in W)
mqttPower = 'power/elec/Home/power'
#===========================================================================
#
# Logging configuration. Env variables are allowed in the file name.
#
#===========================================================================
logFile = '/var/log/tHome/eagle.log'
logLevel = 40
| TD22057/T-Home | conf/eagle.py | Python | bsd-2-clause | 953 |
#!/usr/bin/env python
from setuptools import setup, find_packages
__doc__ = """
Falsify data
"""
version = '0.0.1'
setup(name='perjury',
version=version,
description=__doc__,
author='Aaron Merriam',
author_email='aaronmerriam@gmail.com',
keywords='content',
long_description=__doc__,
url='https://github.com/aaronmerriam/foundry',
packages=find_packages(),
platforms="any",
license='BSD',
test_suite='tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Natural Language :: English',
],
)
| pipermerriam/perjury | setup.py | Python | bsd-2-clause | 565 |
"""
This module provides vCard parameters that are defined by the vCard 4.0
RFC.
"""
from vcard4.parameters import BaseParameter
class Language(BaseParameter):
"""
A LANGUAGE parameter.
Example:
ROLE;LANGUAGE=tr:hoca
"""
def __init__(self, language):
super(Language, self).__init__('LANGUAGE', language)
def __repr__(self):
return 'Language(%r)' % self.value
| Crosse/vcard4 | vcard4/parameters/RFCParameters.py | Python | bsd-2-clause | 413 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'rich_string03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', bold, 'abc', 'defg')
workbook.close()
self.assertExcelEqual()
| jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_rich_string03.py | Python | bsd-2-clause | 1,229 |
# coding: utf8
{
'!=': '!=',
'!langcode!': 'it',
'!langname!': 'Italiano',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ',
'%(nrows)s records found': '%(nrows)s record trovati',
'%d seconds ago': '%d secondi fa',
'%s %%{row} deleted': '%s righe ("record") cancellate',
'%s %%{row} updated': '%s righe ("record") modificate',
'%s selected': '%s selezionato',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'@markmin\x01Number of entries: **%s**': 'Numero di entità: **%s**',
'About': 'About',
'Access Control': 'Controllo Accessi',
'Add': 'Aggiungi',
'Administrative Interface': 'Interfaccia Amministrativa',
'Administrative interface': 'Interfaccia amministrativa',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': "E' stato rilevato un errore, prego %s la pagina",
'And': 'E',
'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura',
'Are you sure you want to delete this object?': 'Sicuro di voler cancellare questo oggetto ?',
'Available Databases and Tables': 'Database e tabelle disponibili',
'Back': 'Indietro',
'Buy this book': 'Compra questo libro',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Non può essere vuoto',
'Change password': 'Cambia Password',
'change password': 'Cambia password',
'Check to delete': 'Seleziona per cancellare',
'Clear': 'Resetta',
'Clear CACHE?': 'Resetta CACHE?',
'Clear DISK': 'Resetta DISK',
'Clear RAM': 'Resetta RAM',
'Client IP': 'Client IP',
'Close': 'Chiudi',
'Cognome': 'Cognome',
'Community': 'Community',
'Components and Plugins': 'Componenti and Plugin',
'contains': 'contiene',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Creato Da',
'Created On': 'Creato Il',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (hidden cols)',
'Current request': 'Richiesta (request) corrente',
'Current response': 'Risposta (response) corrente',
'Current session': 'Sessione (session) corrente',
'customize me!': 'Personalizzami!',
'data uploaded': 'dati caricati',
'Database': 'Database',
'Database %s select': 'Database %s select',
'db': 'db',
'DB Model': 'Modello di DB',
'Delete': 'Cancella',
'Delete:': 'Cancella:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Descrizione',
'design': 'progetta',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentazione',
"Don't know what to do?": 'Non sai cosa fare?',
'done!': 'fatto!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Modifica',
'Edit current record': 'Modifica record corrente',
'edit profile': 'modifica profilo',
'Edit This App': 'Modifica questa applicazione',
'Email and SMS': 'Email e SMS',
'Email non valida': 'Email non valida',
'enter an integer between %(min)g and %(max)g': 'inserisci un intero tra %(min)g e %(max)g',
'Errors': 'Errori',
'Errors in form, please check it out.': 'Errori nel form, ricontrollalo',
'export as csv file': 'esporta come file CSV',
'Export:': 'Esporta:',
'FAQ': 'FAQ',
'First name': 'Nome',
'Forgot username?': 'Dimenticato lo username?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID Gruppo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'hello': 'hello',
'hello world': 'salve mondo',
'Hello World': 'Salve Mondo',
'Hello World in a flash!': 'Salve Mondo in un flash!',
'Home': 'Home',
'How did you get here?': 'Come sei arrivato qui?',
'HTML': 'HTML',
'import': 'importa',
'Import/Export': 'Importa/Esporta',
'Index': 'Indice',
'insert new': 'inserisci nuovo',
'insert new %s': 'inserisci nuovo %s',
'Internal State': 'Stato interno',
'Introduction': 'Introduzione',
'Invalid email': 'Email non valida',
'Invalid login': 'Login non valido',
'Invalid Query': 'Richiesta (query) non valida',
'invalid request': 'richiesta non valida',
'Is Active': "E' attivo",
'Key': 'Chiave',
'Last name': 'Cognome',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Loggato',
'Logged out': 'Disconnesso',
'login': 'accesso',
'Login': 'Login',
'logout': 'uscita',
'Logout': 'Logout',
'Lost Password': 'Password Smarrita',
'Lost password?': 'Password smarrita?',
'lost password?': 'dimenticato la password?',
'Main Menu': 'Menu principale',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Modelli',
'Modified By': 'Modificato da',
'Modified On': 'Modificato il',
'My Sites': 'My Sites',
'Name': 'Nome',
'New': 'Nuovo',
'New password': 'Nuova password',
'New Record': 'Nuovo elemento (record)',
'new record inserted': 'nuovo record inserito',
'next 100 rows': 'prossime 100 righe',
'No databases in this application': 'Nessun database presente in questa applicazione',
'No records found': 'Nessun record trovato',
'Nome': 'Nome',
'Non può essere vuoto': 'Non può essere vuoto',
'not authorized': 'non autorizzato',
'Object or table name': 'Oggeto o nome tabella',
'Old password': 'Vecchia password',
'Online examples': 'Vedere gli esempi',
'Or': 'O',
'or import from csv file': 'oppure importa da file CSV',
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
"Password fields don't match": 'I campi password non sono uguali',
'please input your password again': 'perfavore reimmeti la tua password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': '100 righe precedenti',
'Profile': 'Profilo',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Richiesta (query):',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'Record',
'record does not exist': 'il record non esiste',
'Record ID': 'Record ID',
'Record id': 'Record id',
'Register': 'Registrati',
'register': 'registrazione',
'Registration identifier': 'Registration identifier',
'Registration key': 'Chiave di Registazione',
'Registration successful': 'Registrazione avvenuta',
'reload': 'reload',
'Remember me (for 30 days)': 'Ricordami (per 30 giorni)',
'Request reset password': 'Richiedi il reset della password',
'Reset Password key': 'Resetta chiave Password ',
'Role': 'Ruolo',
'Rows in Table': 'Righe nella tabella',
'Rows selected': 'Righe selezionate',
'Save model as...': 'Salva modello come...',
'Save profile': 'Salva profilo',
'Search': 'Ricerca',
'Semantic': 'Semantic',
'Services': 'Servizi',
'Size of cache:': 'Size of cache:',
'starts with': 'comincia con',
'state': 'stato',
'Statistics': 'Statistics',
'Stylesheet': 'Foglio di stile (stylesheet)',
'submit': 'Inviai',
'Submit': 'Invia',
'Support': 'Support',
'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?',
'Table': 'tabella',
'Table name': 'Nome tabella',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista %s',
'The Views': 'The Views',
'This App': 'This App',
'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Ora (timestamp)',
'too short': 'troppo corto',
'Traceback': 'Traceback',
'TSV (Excel compatible)': 'TSV (Excel compatibile)',
'TSV (Excel compatible, hidden cols)': 'TSV (Excel compatibile, hidden cols)',
'Twitter': 'Twitter',
'unable to parse csv file': 'non riesco a decodificare questo file CSV',
'Update': 'Aggiorna',
'Update:': 'Aggiorna:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID Utente',
'value already in database or empty': 'valore già presente nel database o vuoto',
'Verify Password': 'Verifica Password',
'Videos': 'Videos',
'View': 'Vista',
'Welcome': 'Welcome',
'Welcome %s': 'Benvenuto %s',
'Welcome to web2py': 'Benvenuto su web2py',
'Welcome to web2py!': 'Benvenuto in web2py!',
'Which called the function %s located in the file %s': 'che ha chiamato la funzione %s presente nel file %s',
'XML': 'XML',
'You are successfully running web2py': 'Stai eseguendo web2py con successo',
'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità',
'You visited the url %s': "Hai visitato l'URL %s",
}
| OpenTreeOfLife/opentree | curator/languages/it.py | Python | bsd-2-clause | 9,673 |
#!/usr/bin/python
import fileinput
import string
import sys
import os
ar = 'ar'
fortran_compiler = 'ftn'
fortran_opt_flags = '-O3'
fortran_link_flags = '-O1'
c_compiler = 'cc'
c_opt_flags = '-O3'
src_dir = './src/'
obj_dir = './obj/'
exe_dir = './exe/'
lib_name = 'tce_sort_f77_basic.a'
count = '100'
rank = '30'
ranks = [rank,rank,rank,rank]
size = int(ranks[0])*int(ranks[1])*int(ranks[2])*int(ranks[3])
sizechar = str(size)
def perm(l):
sz = len(l)
if sz <= 1:
return [l]
return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])]
indices = ['4','3','2','1']
#all_permutations = [indices]
#transpose_list = [indices]
#loop_list = [indices]
all_permutations = perm(indices)
transpose_list = perm(indices)
loop_list = perm(indices)
print fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F'
os.system(fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F')
os.system('ar -r '+lib_name+' tce_sort_hirata.o')
print fortran_compiler+' '+fortran_opt_flags+' -c glass_correct.F'
os.system(fortran_compiler+' '+fortran_opt_flags+' -c glass_correct.F')
os.system('ar -r '+lib_name+' glass_correct.o')
print c_compiler+' '+c_opt_flags+' -c tce_sort_4kg.c'
os.system(c_compiler+' '+c_opt_flags+' -c tce_sort_4kg.c')
os.system('ar -r '+lib_name+' tce_sort_4kg.o')
print c_compiler+' '+c_opt_flags+' -c tce_sort_4kg_4321.c'
os.system(c_compiler+' '+c_opt_flags+' -c tce_sort_4kg_4321.c')
os.system('ar -r '+lib_name+' tce_sort_4kg_4321.o')
for transpose_order in transpose_list:
dummy = 0
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
driver_name = 'transpose_'+A+B+C+D
print driver_name
source_name = driver_name+'_driver.F'
lst_name = driver_name+'_driver.lst'
source_file = open(source_name,'w')
source_file.write(' PROGRAM ARRAYTEST\n')
source_file.write('#include "mpif.h"\n')
source_file.write(' REAL*8 before('+ranks[0]+','+ranks[0]+','+ranks[0]+','+ranks[0]+')\n')
source_file.write(' REAL*8 after_jeff('+sizechar+')\n')
source_file.write(' REAL*8 after_hirata('+sizechar+')\n')
source_file.write(' REAL*8 after_glass('+sizechar+')\n')
source_file.write(' REAL*8 factor\n')
source_file.write(' REAL*8 Tstart,Tfinish,Thirata,Tglass,Tjeff\n')
source_file.write(' REAL*8 Tspeedup,Tbest\n')
source_file.write(' INTEGER*4 i,j,k,l\n')
source_file.write(' INTEGER*4 aSize(4)\n')
source_file.write(' INTEGER*4 perm(4)\n')
source_file.write(' INTEGER*4 fastest(4)\n')
source_file.write(' INTEGER ierror\n')
source_file.write(' LOGICAL glass_correct\n')
source_file.write(' EXTERNAL glass_correct\n')
source_file.write(' call mpi_init(ierror)\n')
source_file.write(' aSize(1) = '+ranks[0]+'\n')
source_file.write(' aSize(2) = '+ranks[1]+'\n')
source_file.write(' aSize(3) = '+ranks[2]+'\n')
source_file.write(' aSize(4) = '+ranks[3]+'\n')
source_file.write(' perm(1) = '+A+'\n')
source_file.write(' perm(2) = '+B+'\n')
source_file.write(' perm(3) = '+C+'\n')
source_file.write(' perm(4) = '+D+'\n')
source_file.write(' DO 70 i = 1, '+ranks[0]+'\n')
source_file.write(' DO 60 j = 1, '+ranks[1]+'\n')
source_file.write(' DO 50 k = 1, '+ranks[2]+'\n')
source_file.write(' DO 40 l = 1, '+ranks[3]+'\n')
source_file.write(' before(i,j,k,l) = l + k*10 + j*100 + i*1000\n')
source_file.write('40 CONTINUE\n')
source_file.write('50 CONTINUE\n')
source_file.write('60 CONTINUE\n')
source_file.write('70 CONTINUE\n')
source_file.write(' factor = 1.0\n')
source_file.write(' Tbest=999999.0\n')
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO 30 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4(before, after_hirata,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & perm(1), perm(2), perm(3), perm(4), factor)\n')
source_file.write('30 CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' Thirata=(Tfinish-Tstart)\n')
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' Tstart=rtc()\n')
source_file.write(' IF( ((perm(1).eq.4).and.(perm(2).eq.3)).and.\n')
source_file.write(' & ((perm(3).eq.2).and.(perm(4).eq.1)) ) THEN\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO 31 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4kg_4321_(before, after_glass,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & factor)\n')
source_file.write('31 CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' ELSEIF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO 32 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4kg_(before, after_glass,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & perm(1), perm(2), perm(3), perm(4), factor)\n')
source_file.write('32 CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' ENDIF\n')
#source_file.write(' Tfinish=rtc()\n')
source_file.write(' Tglass=(Tfinish-Tstart)\n')
source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
#source_file.write(' PRINT*," i after_glass(i)\n')
#source_file.write(' & after_hirata(i)"\n')
source_file.write(' DO 33 i = 1, '+sizechar+'\n')
source_file.write(' IF (after_glass(i).ne.after_hirata(i)) THEN\n')
source_file.write(' PRINT*,"glass error ",i,after_glass(i),after_hirata(i)\n')
source_file.write(' ENDIF\n')
source_file.write('33 CONTINUE\n')
source_file.write(' ENDIF\n')
source_file.write(' write(6,*) "TESTING TRANPOSE TYPE '+A+B+C+D+'"\n')
source_file.write(' write(6,*) "==================="\n')
source_file.write(' write(6,*) "The compilation flags were:"\n')
for option in range(0,len(fortran_opt_flags.split())):
source_file.write(' write(6,*) "'+fortran_opt_flags.split()[option]+'"\n')
source_file.write(' write(6,*) "==================="\n')
source_file.write(' write(6,*) "Hirata Reference = ",Thirata,"seconds"\n')
source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' write(6,*) "KGlass Reference = ",Tglass,"seconds"\n')
source_file.write(' ENDIF\n')
source_file.write(' write(6,1001) "Algorithm","Jeff","Speedup","Best","Best Speedup"\n')
for loop_order in loop_list:
dummy = dummy+1
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
subroutine_name = 'trans_'+A+B+C+D+'_loop_'+a+b+c+d+'_'
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' CALL CPU_TIME(Tstart)\n')
source_file.write(' DO '+str(100+dummy)+' i = 1, '+count+'\n')
source_file.write(' CALL '+subroutine_name+'(before, after_jeff,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & factor)\n')
source_file.write(str(100+dummy)+' CONTINUE\n')
source_file.write(' CALL CPU_TIME(Tfinish)\n')
source_file.write(' Tjeff=(Tfinish-Tstart)\n')
source_file.write(' Tspeedup=Thirata/Tjeff\n')
source_file.write(' Tbest=min(Tjeff,Tbest)\n')
source_file.write(' if(Tjeff.eq.Tbest) then \n')
source_file.write(' fastest(1)='+a+'\n')
source_file.write(' fastest(2)='+b+'\n')
source_file.write(' fastest(3)='+c+'\n')
source_file.write(' fastest(4)='+d+'\n')
source_file.write(' endif\n')
# source_file.write(' goto 911\n') ########################
if 0 < dummy < 10:
nice_dummy=' '+str(dummy)
if 9 < dummy < 100:
nice_dummy=' '+str(dummy)
if 99 < dummy < 999:
nice_dummy=''+str(dummy)
#source_file.write(' PRINT*,"Loop '+a+b+c+d+' ",Tjeff,Tspeedup\n')
source_file.write(' write(6,1100) "'+nice_dummy+' Loop '+a+b+c+d+' ",\n')
source_file.write(' & Tjeff,Tspeedup,Tbest,Thirata/Tbest\n')
#source_file.write(' DO '+str(500+dummy)+' i = 1, '+sizechar+'\n')
#source_file.write(' IF (after_jeff(i).ne.after_hirata(i)) THEN\n')
#source_file.write(' write(6,*),"transpose is wrong for element = ",i\n')
#source_file.write(' ENDIF\n')
#source_file.write(str(500+dummy)+' CONTINUE\n')
#source_file.write(' PRINT*," i, after_jeff(i),after_hirata(i)"\n')
source_file.write(' DO '+str(500+dummy)+' i = 1, '+sizechar+'\n')
source_file.write(' IF (after_jeff(i).ne.after_hirata(i)) THEN\n')
source_file.write(' PRINT*,"jeff error ",i,after_jeff(i),after_hirata(i)\n')
source_file.write(' ENDIF\n')
source_file.write(str(500+dummy)+' CONTINUE\n')
source_file.write(' write(6,1020) "The best loop order for '+A+B+C+D+' is:",\n')
source_file.write(' & fastest(1),fastest(2),fastest(3),fastest(4)\n')
source_file.write(' write(6,1030) "The best time is:",Tbest\n')
source_file.write(' write(6,1030) "The best speedup is:",Thirata/Tbest\n')
source_file.write(' call mpi_finalize(ierror)\n')
source_file.write(' STOP\n')
source_file.write(' 1001 format(1x,a13,a12,a15,a9,a18)\n')
source_file.write(' 1020 format(1x,a30,8x,4i1)\n')
source_file.write(' 1030 format(1x,a30,1f12.5)\n')
source_file.write(' 1100 format(1x,a16,4f12.5)\n')
source_file.write(' 911 continue\n')
source_file.write(' END\n')
source_file.close()
print fortran_compiler+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+' -o '+exe_dir+driver_name+'.x'
os.system(fortran_compiler+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' -o '+exe_dir+driver_name+'.x')
os.system('mv '+source_name+' '+src_dir)
| jeffhammond/spaghetty | branches/old/python/archive/build_executables_basic.py | Python | bsd-2-clause | 11,373 |
#!/usr/bin/python
import os, sys
from AnnotationLib import *
from optparse import OptionParser
import copy
import math
# BASED ON WIKIPEDIA VERSION
# n - number of nodes
# C - capacity matrix
# F - flow matrix
# s - source
# t - sink
# sumC - sum over rows of C (too speed up computation)
def edmonds_karp(n, C, s, t, sumC):
# Residual capacity from u to v is C[u][v] - F[u][v]
F = [[0] * n for i in xrange(n)]
while True:
P = [-1] * n # Parent table
P[s] = s
M = [0] * n # Capacity of path to node
M[s] = float('infinity')
Q = [s] # BFS queue
while Q:
u = Q.pop(0)
for v in xrange(n):
# There is available capacity,
# and v is not seen before in search
if C[u][v] - F[u][v] > 0 and P[v] == -1:
P[v] = u
M[v] = min(M[u], C[u][v] - F[u][v])
if v != t:
if(sumC[u] > 0):
Q.append(v)
else:
# Backtrack search, and write flow
while P[v] != v:
u = P[v]
F[u][v] += M[t]
F[v][u] -= M[t]
v = u
Q = None
break
if P[t] == -1: # We did not find a path to t
return (F)
class AnnoGraph:
def __init__(self, anno, det, ignore, style, minCover, minOverlap, maxDistance, ignoreOverlap):
# setting rects
#print anno.imageName
self.anno = anno
self.det = det
self.det.sortByScore("descending")
# generate initial graph
self.n = len(det.rects)
self.m = len(anno.rects)
# Number of nodes = number of detections + number of GT + source + sink
self.a = self.n + self.m + 2
# Flow matrix
self.F = [[0] * self.a for i in xrange(self.a)]
# Capacity matrix
self.C = [[0] * self.a for i in xrange(self.a)]
# Connect source to all detections
for i in range(1, self.n + 1):
self.C[0][i] = 1
self.C[i][0] = 1
# Connect sink to all GT
for i in range(self.n + 1, self.a - 1):
self.C[i][self.a - 1] = 1
self.C[self.a - 1][i] = 1
# Overall flow
self.full_flow = 0
self.ignore_flow = 0
# match rects / Adjacency matrix
self.M = [[] for i in xrange(self.n)]
self.match(style, minCover, minOverlap, maxDistance)
self.nextN = 0
# Deactivate All Non Matching detections
# Save row sums for capacity matrix
self.sumC = []
self.sumC.append(self.n)
for q in [len(self.M[j]) for j in xrange(len(self.M))]:
self.sumC.append(q)
for q in [1] * self.m:
self.sumC.append(q)
# Initially no links are active
self.sumC_active = []
self.sumC_active.append(self.n)
for q in [len(self.M[j]) for j in xrange(len(self.M))]:
self.sumC_active.append(0)
for q in [1] * self.m:
self.sumC_active.append(q)
#
self.ignore = [ 0 ] * self.m
for ig in ignore.rects:
for i, r in enumerate(anno.rects):
if(ig.overlap_pascal(r) > ignoreOverlap):
self.ignore[i] = 1
def match(self, style, minCover, minOverlap, maxDistance):
for i in xrange(self.n):
detRect = self.det.rects[i]
for j in xrange(self.m):
annoRect = self.anno.rects[j]
# Bastian Leibe's matching style
if(style == 0):
if detRect.isMatchingStd(annoRect, minCover, minOverlap, maxDistance):
self.M[i].append(self.n + 1 + j)
# Pascal Matching style
if(style == 1):
if (detRect.isMatchingPascal(annoRect, minOverlap)):
self.M[i].append(self.n + 1 + j)
def decreaseScore(self, score):
capacity_change = False
for i in xrange(self.nextN, self.n):
if (self.det.rects[i].score >= score):
capacity_change = self.insertIntoC(i + 1) or capacity_change
self.nextN += 1
else:
break
if capacity_change:
self.F = edmonds_karp(self.a, self.C, 0, self.a - 1, self.sumC_active)
self.full_flow = sum([self.F[0][i] for i in xrange(self.a)])
self.ignore_flow = sum([self.F[i][self.a - 1] * self.ignore[i - 1 - self.n] for i in range(1 + self.n, 1 + self.n + self.m )])
return capacity_change
def addBB(self, rect):
self.nextN += 1
capacity_change = self.insertIntoC(rect.boxIndex + 1)
if capacity_change:
self.F = edmonds_karp(self.a, self.C, 0, self.a - 1, self.sumC_active)
self.full_flow = sum([self.F[0][i] for i in xrange(self.a)])
self.ignore_flow = sum([self.F[i][self.a - 1] * self.ignore[i - 1 - self.n] for i in range(1 + self.n, 1 + self.n + self.m )])
return capacity_change
def insertIntoC(self, i):
#print "Inserting node", i, self.det.rects[i-1].score, "of image", self.anno.imageName
for match in self.M[i - 1]:
#print " match: ", match
self.C[i][match] = 1
self.C[match][i] = 1
self.sumC_active[i] = self.sumC[i]
return self.sumC[i] > 0
def maxflow(self):
return self.full_flow - self.ignore_flow
def consideredDets(self):
return self.nextN - self.ignore_flow
def ignoredFlow(self):
return self.ignore_flow
def getTruePositives(self):
ret = copy.copy(self.anno)
ret.rects = []
#iterate over GT
for i in xrange(self.n + 1, self.a - 1):
#Flow to sink > 0
if(self.F[i][self.a - 1] > 0 and self.ignore[i - self.n - 1] == 0):
#Find associated det
for j in xrange(1, self.n + 1):
if(self.F[j][i] > 0):
ret.rects.append(self.det[j - 1])
break
return ret
def getIgnoredTruePositives(self):
ret = copy.copy(self.anno)
ret.rects = []
#iterate over GT
for i in xrange(self.n + 1, self.a - 1):
#Flow to sink > 0
if(self.F[i][self.a - 1] > 0 and self.ignore[i - self.n - 1] == 1):
#Find associated det
for j in xrange(1, self.n + 1):
if(self.F[j][i] > 0):
ret.rects.append(self.det[j - 1])
break
return ret
def getMissingRecall(self):
ret = copy.copy(self.anno)
ret.rects = []
for i in xrange(self.n + 1, self.a - 1):
if(self.F[i][self.a - 1] == 0 and self.ignore[i - self.n - 1] == 0):
ret.rects.append(self.anno.rects[i - self.n - 1])
return ret
def getFalsePositives(self):
ret = copy.copy(self.det)
ret.rects = []
for i in xrange(1, self.n + 1):
if(self.F[0][i] == 0):
ret.rects.append(self.det[i - 1])
return ret
def asort(idlGT, idlDet, minWidth, minHeight, style, minCover, minOverlap, maxDistance, maxWidth=float('inf'), maxHeight=float('inf')):
#Asort too small object in ground truth
for x,anno in enumerate(idlGT):
imageFound = False
filterIndex = -1
for i,filterAnno in enumerate(idlDet):
if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr):
filterIndex = i
imageFound = True
break
if(not imageFound):
continue
validGTRects = []
for j in anno.rects:
if (j.width() >= minWidth) and (j.height() >= minHeight) and (j.width() <= maxWidth) and (j.height() <= maxHeight):
validGTRects.append(j)
else:
# Sort out detections that would have matched
matchingIndexes = []
for m,frect in enumerate(idlDet[filterIndex].rects):
if(style == 0):
if (j.isMatchingStd(frect, minCover,minOverlap, maxDistance)):
overlap = j.overlap_pascal(frect)
matchingIndexes.append((m,overlap))
if(style == 1):
if(j.isMatchingPascal(frect, minOverlap)):
overlap = j.overlap_pascal(frect)
matchingIndexes.append((m, overlap))
for m in xrange(len(matchingIndexes) - 1, -1, -1):
matching_rect = idlDet[filterIndex].rects[matchingIndexes[m][0]]
matching_overlap = matchingIndexes[m][1]
better_overlap_found = False
for l in anno.rects:
if l.overlap_pascal(matching_rect) > matching_overlap:
better_overlap_found = True
if better_overlap_found:
continue
del idlDet[filterIndex].rects[matchingIndexes[m][0]]
idlGT[x].rects = validGTRects
#Sort out too small false positives
for x,anno in enumerate(idlDet):
imageFound = False
filterIndex = -1
for i,filterAnno in enumerate(idlGT):
if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr):
filterIndex = i
imageFound = True
break
if(not imageFound):
continue
validDetRects = []
for j in anno.rects:
if (j.width() >= minWidth) and (j.height() >= minHeight) and (j.width() <= maxWidth) and (j.height() <= maxHeight):
validDetRects.append(j)
else:
for frect in idlGT[filterIndex].rects:
if(style == 0):
if j.isMatchingStd(frect, minCover,minOverlap, maxDistance):
validDetRects.append(j)
if(style == 1):
if(j.isMatchingPascal(frect, minOverlap)):
validDetRects.append(j)
idlDet[x].rects = validDetRects
def main():
parser = OptionParser(usage="usage: %prog [options] <groundTruthIdl> <detectionIdl>")
parser.add_option("-o", "--outFile",
action="store", type="string", dest="outFile")
parser.add_option("-a", "--analysisFiles",
action="store", type="string", dest="analysisFile")
parser.add_option("-s", "--minScore",
action="store", type="float", dest="minScore")
parser.add_option("-w", "--minWidth",
action="store", type="int", dest="minWidth", default=0)
parser.add_option("-u", "--minHeight",
action="store", type="int", dest="minHeight",default=0)
parser.add_option("--maxWidth", action="store", type="float", dest="maxWidth", default=float('inf'))
parser.add_option("--maxHeight", action="store", type="float", dest="maxHeight", default=float('inf'))
parser.add_option("-r", "--fixAspectRatio",
action="store", type="float", dest="aspectRatio")
parser.add_option("-p", "--Pascal-Style", action="store_true", dest="pascalStyle")
parser.add_option("-l", "--Leibe-Seemann-Matching-Style", action="store_true", dest="leibeStyle")
parser.add_option("--minCover", action="store", type="float", dest="minCover", default=0.5)
parser.add_option("--maxDistance", action="store", type="float", dest="maxDistance", default=0.5)
parser.add_option("--minOverlap", action="store", type="float", dest="minOverlap", default=0.5)
parser.add_option("--clipToImageWidth", action="store", type="float", dest="clipWidth", default= None)
parser.add_option("--clipToImageHeight", action="store", type="float", dest="clipHeight", default= None)
parser.add_option("-d", "--dropFirst", action="store_true", dest="dropFirst")
#parser.add_option("-c", "--class", action="store", type="int", dest="classID", default=-1)
parser.add_option("-c", "--class", action="store", type="int", dest="classID", default = None)
parser.add_option("-i", "--ignore", action="store", type="string", dest="ignoreFile")
parser.add_option("--ignoreOverlap", action="store", type="float", dest="ignoreOverlap", default = 0.9)
(options, args) = parser.parse_args()
if (len(args) < 2):
print "Please specify annotation and detection as arguments!"
parser.print_help()
sys.exit(1)
annoFile = args[0]
# First figure out the minimum height and width we are dealing with
minWidth = options.minWidth
minHeight = options.minHeight
maxWidth = options.maxWidth
maxHeight = options.maxHeight
print "Minimum width: %d height: %d" % (minWidth, minHeight)
# Load files
annoIDL = parse(annoFile)
detIDL = []
for dets in args[1:]:
detIDL += parse(dets)
if options.ignoreFile != None:
ignoreIDL = parse(options.ignoreFile)
else:
ignoreIDL = copy.deepcopy(annoIDL)
for anno in ignoreIDL:
anno.rects = []
if(options.classID is not None):
for anno in annoIDL:
anno.rects = [rect for rect in anno.rects if (rect.classID == options.classID or rect.classID == -1)]
for anno in detIDL:
anno.rects = [rect for rect in anno.rects if (rect.classID == options.classID or rect.classID == -1)]
for anno in ignoreIDL:
anno.rects = [rect for rect in anno.rects if (rect.classID == options.classID or rect.classID == -1)]
# prevent division by zero when fixing aspect ratio
for anno in annoIDL:
anno.rects = [rect for rect in anno.rects if rect.width() > 0 and rect.height() > 0]
for anno in detIDL:
anno.rects = [rect for rect in anno.rects if rect.width() > 0 and rect.height() > 0]
for anno in ignoreIDL:
anno.rects = [rect for rect in anno.rects if rect.width() > 0 and rect.height() > 0]
# Fix aspect ratio
if (not options.aspectRatio == None):
forceAspectRatio(annoIDL, options.aspectRatio)
forceAspectRatio(detIDL, options.aspectRatio)
forceAspectRatio(ignoreIDL, options.aspectRatio)
# Deselect detections with too low score
if (not options.minScore == None):
for i,anno in enumerate(detIDL):
validRects = []
for rect in anno.rects:
if (rect.score >= options.minScore):
validRects.append(rect)
anno.rects = validRects
# Clip detections to the image dimensions
if(options.clipWidth != None or options.clipHeight != None):
min_x = -float('inf')
min_y = -float('inf')
max_x = float('inf')
max_y = float('inf')
if(options.clipWidth != None):
min_x = 0
max_x = options.clipWidth
if(options.clipHeight != None):
min_y = 0
max_y = options.clipHeight
print "Clipping width: (%.02f-%.02f); clipping height: (%.02f-%.02f)" % (min_x, max_x, min_y, max_y)
for anno in annoIDL:
for rect in anno:
rect.clipToImage(min_x, max_x, min_y, max_y)
for anno in detIDL:
for rect in anno:
rect.clipToImage(min_x, max_x, min_y, max_y)
# Setup matching style; standard is Pascal
# style
matchingStyle = 1
# Pascal style
if (options.pascalStyle == True):
matchingStyle = 1
if (options.leibeStyle == True):
matchingStyle = 0
if (options.pascalStyle and options.leibeStyle):
print "Conflicting matching styles!"
sys.exit(1)
if (options.dropFirst == True):
print "Drop first frame of each sequence..."
newIDL = []
for i, anno in enumerate(detIDL):
if (i > 1 and detIDL[i].frameNr == detIDL[i-1].frameNr + 1 and detIDL[i].frameNr == detIDL[i-2].frameNr + 2 and detIDL[i].frameNr == detIDL[i-3].frameNr + 3 and detIDL[i].frameNr == detIDL[i-4].frameNr + 4):
newIDL.append(anno)
detIDL = newIDL
# Asort detections which are too small/too big
print "Asorting too large/ too small detections"
asort(annoIDL, detIDL, minWidth, minHeight, matchingStyle, options.minCover, options.minOverlap, options.maxDistance, maxWidth, maxHeight)
#Debugging asort
#saveIDL("testGT.idl", annoIDL)
#saveIDL("testDET.idl", detIDL)
noAnnotations = 0
for anno in annoIDL:
for j,detAnno in enumerate(detIDL):
if (suffixMatch(anno.imageName, detIDL[j].imageName) and anno.frameNr == detIDL[j].frameNr):
noAnnotations = noAnnotations + len(anno.rects)
break
print "#Annotations:", noAnnotations
###--- set up graphs ---###
print "Setting up graphs ..."
graphs = []
allRects = []
missingFrames = 0
for i in xrange(len(annoIDL)):
imageFound = False
filterIndex = -1
for j, detAnno in enumerate(detIDL):
if (suffixMatch(annoIDL[i].imageName, detIDL[j].imageName) and annoIDL[i].frameNr == detIDL[j].frameNr):
filterIndex = j
imageFound = True
break
if(not imageFound):
print "No annotation/detection pair found for: " + annoIDL[i].imageName + " frame: " + str(annoIDL[i].frameNr)
missingFrames += 1
continue;
graphs.append(AnnoGraph(annoIDL[i], detIDL[filterIndex], ignoreIDL[i], matchingStyle, options.minCover, options.minOverlap, options.maxDistance, options.ignoreOverlap))
for j,rect in enumerate(detIDL[filterIndex]):
newRect = detAnnoRect()
newRect.imageName = anno.imageName
newRect.frameNr = anno.frameNr
newRect.rect = rect
newRect.imageIndex = i - missingFrames
newRect.boxIndex = j
allRects.append(newRect)
print "missingFrames: ", missingFrames
print "Number of detections on annotated frames: " , len(allRects)
###--- get scores from all rects ---###
print "Sorting scores ..."
allRects.sort(cmpDetAnnoRectsByScore)
allRects.reverse()
###--- gradually decrease score ---###
print "Gradually decrease score ..."
lastScore = float('infinity')
precs = [1.0]
recalls = [0.0]
#fppi = [ 10**(math.floor(math.log(1.0 / float(len(annoIDL)))/math.log(10) * 10.0) / 10.0) ]
fppi = [ 1.0 / float(len(annoIDL)) ]
scores = [lastScore]
numDet = len(allRects)
sf = lastsf = 0
cd = lastcd = 0
iflow = lastiflow = 0
changed = False
firstFP = True
for i,nextrect in enumerate(allRects):
score = nextrect.rect.score;
# updating true and false positive counts
sf = sf - graphs[nextrect.imageIndex].maxflow()
cd = cd - graphs[nextrect.imageIndex].consideredDets()
iflow = iflow - graphs[nextrect.imageIndex].ignoredFlow()
#changed = changed or graphs[nextrect.imageIndex].decreaseScore(score)
changed = graphs[nextrect.imageIndex].addBB(nextrect) or changed
sf = sf + graphs[nextrect.imageIndex].maxflow()
cd = cd + graphs[nextrect.imageIndex].consideredDets()
iflow = iflow + graphs[nextrect.imageIndex].ignoredFlow()
if(firstFP and cd - sf != 0):
firstFP = False
changed = True
if (i == numDet - 1 or score != allRects[i + 1].rect.score or firstFP or i == len(allRects)):
if(changed or i == numDet - 1 or i == len(allRects)):
if(lastcd > 0):
scores.append(lastScore)
recalls.append(float(lastsf) / float(noAnnotations - lastiflow))
precs.append(float(lastsf) / float(lastcd))
fppi.append(float(lastcd - lastsf) / float(len(annoIDL)))
if (cd > 0):
scores.append(score)
recalls.append(float(sf) / float(noAnnotations - iflow))
precs.append(float(sf) / float(cd))
fppi.append(float(cd - sf) / float(len(annoIDL)))
changed = False
lastScore = score
lastsf = sf
lastcd = cd
lastiflow = iflow
###--- output to file ---###
outfilename = options.outFile
if outfilename is None:
outputDir = os.path.dirname(os.path.abspath(args[1]))
outputFile = os.path.basename(os.path.abspath(args[1]))
[base, ext] = idlBase(outputFile)
outfilename = outputDir + "/rpc-" + base +".txt"
print "saving " + outfilename;
file = open(outfilename, 'w')
for i in xrange(len(precs)):
file.write(str(precs[i])+" "+str(recalls[i])+" "+str(scores[i])+ " " + str(fppi[i])+ "\n")
file.close()
# Extracting failure cases
if(options.analysisFile != None):
anaPrefix = options.analysisFile
falsePositives = []
truePositives = []
missingRecall = []
ignoredTruePositives = []
for i in xrange(len(graphs)):
falsePositives.append(graphs[i].getFalsePositives())
truePositives.append(graphs[i].getTruePositives())
truePositives[-1].imageName = falsePositives[-1].imageName
truePositives[-1].imagePath = falsePositives[-1].imagePath
missingRecall.append(graphs[i].getMissingRecall())
missingRecall[-1].imageName = falsePositives[-1].imageName
missingRecall[-1].imagePath = falsePositives[-1].imagePath
if options.ignoreFile != None:
ignoredTruePositives.append(graphs[i].getIgnoredTruePositives())
saveIDL(anaPrefix + "-falsePositives.idl.gz", falsePositives);
sortedFP = annoAnalyze(falsePositives);
saveIDL(anaPrefix + "-falsePositives-sortedByScore.idl.gz", sortedFP);
saveIDL(anaPrefix + "-truePositives.idl.gz", truePositives);
sortedFP = annoAnalyze(truePositives);
saveIDL(anaPrefix + "-truePositives-sortedByScore.idl.gz", sortedFP);
if options.ignoreFile != None:
saveIDL(anaPrefix + "-ignoredTruePositives.idl.gz", ignoredTruePositives)
saveIDL(anaPrefix + "-missingRecall.idl.gz", missingRecall);
if __name__ == "__main__":
main()
| sameeptandon/sail-car-log | car_tracking/doRPC.py | Python | bsd-2-clause | 19,670 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...utility import xl_cell_to_rowcol_abs
class TestUtility(unittest.TestCase):
"""
Test xl_cell_to_rowcol_abs() utility function.
"""
def test_xl_cell_to_rowcol_abs(self):
"""Test xl_cell_to_rowcol_abs()"""
tests = [
# row, col, A1 string
(0, 0, 'A1'),
(0, 1, 'B1'),
(0, 2, 'C1'),
(0, 9, 'J1'),
(1, 0, 'A2'),
(2, 0, 'A3'),
(9, 0, 'A10'),
(1, 24, 'Y2'),
(7, 25, 'Z8'),
(9, 26, 'AA10'),
(1, 254, 'IU2'),
(1, 255, 'IV2'),
(1, 256, 'IW2'),
(0, 16383, 'XFD1'),
(1048576, 16384, 'XFE1048577'),
]
for row, col, string in tests:
exp = (row, col, 0, 0)
got = xl_cell_to_rowcol_abs(string)
self.assertEqual(got, exp)
def test_xl_cell_to_rowcol_abs_abs(self):
"""Test xl_cell_to_rowcol_abs() with absolute references"""
tests = [
# row, col, row_abs, col_abs, A1 string
(0, 0, 0, 0, 'A1'),
(0, 0, 1, 0, 'A$1'),
(0, 0, 0, 1, '$A1'),
(0, 0, 1, 1, '$A$1'),
]
for row, col, row_abs, col_abs, string in tests:
exp = (row, col, row_abs, col_abs)
got = xl_cell_to_rowcol_abs(string)
self.assertEqual(got, exp)
| jmcnamara/XlsxWriter | xlsxwriter/test/utility/test_xl_cell_to_rowcol_abs.py | Python | bsd-2-clause | 1,642 |
# -*- coding: utf-8 -*-
import unittest
import mock
import openerp.tests.common as common
from openerp.addons.connector.unit.mapper import (
Mapper,
ImportMapper,
ExportMapper,
ImportMapChild,
MappingDefinition,
changed_by,
only_create,
convert,
follow_m2o_relations,
m2o_to_backend,
backend_to_m2o,
none,
MapOptions,
mapping)
from openerp.addons.connector.backend import Backend
from openerp.addons.connector.connector import ConnectorEnvironment
from openerp.addons.connector.session import ConnectorSession
class test_mapper(unittest.TestCase):
""" Test Mapper """
def test_mapping_decorator(self):
class KifKrokerMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
@only_create
def name(self):
pass
@changed_by('email')
@mapping
def email(self):
pass
@changed_by('street')
@mapping
def street(self):
pass
def no_decorator(self):
pass
self.maxDiff = None
name_def = MappingDefinition(changed_by=set(('name', 'city')),
only_create=True)
email_def = MappingDefinition(changed_by=set(('email',)),
only_create=False)
street_def = MappingDefinition(changed_by=set(('street',)),
only_create=False)
self.assertEqual(KifKrokerMapper._map_methods,
{'name': name_def,
'email': email_def,
'street': street_def,
})
def test_mapping_decorator_cross_classes(self):
""" Mappings should not propagate to other classes"""
class MomMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
class ZappMapper(Mapper):
_model_name = 'res.users'
@changed_by('email')
@only_create
@mapping
def email(self):
pass
mom_def = MappingDefinition(changed_by=set(('name', 'city')),
only_create=False)
zapp_def = MappingDefinition(changed_by=set(('email',)),
only_create=True)
self.assertEqual(MomMapper._map_methods,
{'name': mom_def})
self.assertEqual(ZappMapper._map_methods,
{'email': zapp_def})
def test_mapping_decorator_cumul(self):
""" Mappings should cumulate the ``super`` mappings
and the local mappings."""
class FryMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
class FarnsworthMapper(FryMapper):
_model_name = 'res.users'
@changed_by('email')
@mapping
def email(self):
pass
name_def = MappingDefinition(changed_by=set(('name', 'city')),
only_create=False)
email_def = MappingDefinition(changed_by=set(('email',)),
only_create=False)
self.assertEqual(FarnsworthMapper._map_methods,
{'name': name_def,
'email': email_def})
def test_mapping_decorator_cumul_changed_by(self):
""" Mappings should cumulate the changed_by fields of the
``super`` mappings and the local mappings """
class FryMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
class FarnsworthMapper(FryMapper):
_model_name = 'res.users'
@changed_by('email')
@mapping
def name(self):
pass
class ThirdMapper(FarnsworthMapper):
_model_name = 'res.users'
@changed_by('email', 'street')
@mapping
def name(self):
pass
name_def = MappingDefinition(changed_by=set(('name', 'city', 'email')),
only_create=False)
self.assertEqual(FarnsworthMapper._map_methods,
{'name': name_def})
name_def = MappingDefinition(changed_by=set(('name', 'city',
'email', 'street')),
only_create=False)
self.assertEqual(ThirdMapper._map_methods,
{'name': name_def})
def test_several_bases_cumul(self):
class FryMapper(Mapper):
_model_name = 'res.users'
@changed_by('name', 'city')
@mapping
def name(self):
pass
@only_create
@mapping
def street(self):
pass
@only_create
@mapping
def zip(self):
pass
class FarnsworthMapper(Mapper):
_model_name = 'res.users'
@changed_by('email')
@mapping
def name(self):
pass
@changed_by('street')
@mapping
def city(self):
pass
@mapping
def zip(self):
pass
class ThirdMapper(FryMapper, FarnsworthMapper):
_model_name = 'res.users'
@changed_by('email', 'street')
@mapping
def name(self):
pass
@mapping
def email(self):
pass
name_def = MappingDefinition(changed_by=set(('name', 'city',
'email', 'street')),
only_create=False)
street_def = MappingDefinition(changed_by=set([]),
only_create=True)
city_def = MappingDefinition(changed_by=set(('street',)),
only_create=False)
email_def = MappingDefinition(changed_by=set([]),
only_create=False)
zip_def = MappingDefinition(changed_by=set([]),
only_create=True)
self.assertEqual(ThirdMapper._map_methods['name'], name_def)
self.assertEqual(ThirdMapper._map_methods['street'], street_def)
self.assertEqual(ThirdMapper._map_methods['city'], city_def)
self.assertEqual(ThirdMapper._map_methods['email'], email_def)
self.assertEqual(ThirdMapper._map_methods['zip'], zip_def)
def test_mapping_record(self):
""" Map a record and check the result """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
@mapping
def street(self, record):
return {'out_street': record['street'].upper()}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'out_street': 'STREET'}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_record_on_create(self):
""" Map a record and check the result for creation of record """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
@mapping
def street(self, record):
return {'out_street': record['street'].upper()}
@only_create
@mapping
def city(self, record):
return {'out_city': 'city'}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'out_street': 'STREET'}
self.assertEqual(map_record.values(), expected)
expected = {'out_name': 'Guewen',
'out_street': 'STREET',
'out_city': 'city'}
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_update(self):
""" Force values on a map record """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
@mapping
def street(self, record):
return {'out_street': record['street'].upper()}
@only_create
@mapping
def city(self, record):
return {'out_city': 'city'}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
map_record.update({'test': 1}, out_city='forced')
expected = {'out_name': 'Guewen',
'out_street': 'STREET',
'out_city': 'forced',
'test': 1}
self.assertEqual(map_record.values(), expected)
expected = {'out_name': 'Guewen',
'out_street': 'STREET',
'out_city': 'forced',
'test': 1}
self.assertEqual(map_record.values(for_create=True), expected)
def test_finalize(self):
""" Inherit finalize to modify values """
class MyMapper(ImportMapper):
direct = [('name', 'out_name')]
def finalize(self, record, values):
result = super(MyMapper, self).finalize(record, values)
result['test'] = 'abc'
return result
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'test': 'abc'}
self.assertEqual(map_record.values(), expected)
expected = {'out_name': 'Guewen',
'test': 'abc'}
self.assertEqual(map_record.values(for_create=True), expected)
def test_some_fields(self):
""" Map only a selection of fields """
class MyMapper(ImportMapper):
direct = [('name', 'out_name'),
('street', 'out_street'),
]
@changed_by('country')
@mapping
def country(self, record):
return {'country': 'country'}
env = mock.MagicMock()
record = {'name': 'Guewen',
'street': 'street',
'country': 'country'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen',
'country': 'country'}
self.assertEqual(map_record.values(fields=['name', 'country']),
expected)
expected = {'out_name': 'Guewen',
'country': 'country'}
self.assertEqual(map_record.values(for_create=True,
fields=['name', 'country']),
expected)
def test_mapping_modifier(self):
""" Map a direct record with a modifier function """
def do_nothing(field):
def transform(self, record, to_attr):
return record[field]
return transform
class MyMapper(ImportMapper):
direct = [(do_nothing('name'), 'out_name')]
env = mock.MagicMock()
record = {'name': 'Guewen'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 'Guewen'}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_convert(self):
""" Map a direct record with the convert modifier function """
class MyMapper(ImportMapper):
direct = [(convert('name', int), 'out_name')]
env = mock.MagicMock()
record = {'name': '300'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 300}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_modifier_none(self):
""" Pipeline of modifiers """
class MyMapper(ImportMapper):
direct = [(none('in_f'), 'out_f'),
(none('in_t'), 'out_t')]
env = mock.MagicMock()
record = {'in_f': False, 'in_t': True}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_f': None, 'out_t': True}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_modifier_pipeline(self):
""" Pipeline of modifiers """
class MyMapper(ImportMapper):
direct = [(none(convert('in_f', bool)), 'out_f'),
(none(convert('in_t', bool)), 'out_t')]
env = mock.MagicMock()
record = {'in_f': 0, 'in_t': 1}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_f': None, 'out_t': True}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_custom_option(self):
""" Usage of custom options in mappings """
class MyMapper(ImportMapper):
@mapping
def any(self, record):
if self.options.custom:
res = True
else:
res = False
return {'res': res}
env = mock.MagicMock()
record = {}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'res': True}
self.assertEqual(map_record.values(custom=True), expected)
def test_mapping_custom_option_not_defined(self):
""" Usage of custom options not defined raise AttributeError """
class MyMapper(ImportMapper):
@mapping
def any(self, record):
if self.options.custom is None:
res = True
else:
res = False
return {'res': res}
env = mock.MagicMock()
record = {}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'res': True}
self.assertEqual(map_record.values(), expected)
def test_map_options(self):
""" Test MapOptions """
options = MapOptions({'xyz': 'abc'}, k=1)
options.l = 2
self.assertEqual(options['xyz'], 'abc')
self.assertEqual(options['k'], 1)
self.assertEqual(options['l'], 2)
self.assertEqual(options.xyz, 'abc')
self.assertEqual(options.k, 1)
self.assertEqual(options.l, 2)
self.assertEqual(options['undefined'], None)
self.assertEqual(options.undefined, None)
def test_changed_by_fields(self):
""" Test attribute ``_changed_by_fields`` on Mapper."""
class MyExportMapper(ExportMapper):
direct = [('street', 'out_street'),
(none('in_t'), 'out_t'),
(none(convert('in_f', bool)), 'out_f')]
@changed_by('name', 'city')
@mapping
def name(self):
pass
@changed_by('email')
@mapping
def email(self):
pass
def no_decorator(self):
pass
self.assertEqual(
MyExportMapper._changed_by_fields,
set(['street', 'in_t', 'in_f', 'name', 'city', 'email']))
class test_mapper_recordsets(common.TransactionCase):
""" Test mapper with "real" records instead of mocks """
def setUp(self):
super(test_mapper_recordsets, self).setUp()
self.session = ConnectorSession(self.cr, self.uid)
self.backend = mock.Mock(wraps=Backend('x', version='y'),
name='backend')
backend_record = mock.Mock()
backend_record.get_backend.return_value = self.backend
self.connector_env = ConnectorEnvironment(
backend_record, self.session, 'res.partner')
def test_mapping_modifier_follow_m2o_relations(self):
""" Map with the follow_m2o_relations modifier """
class MyMapper(ImportMapper):
direct = [
(follow_m2o_relations('parent_id.name'), 'parent_name'),
]
partner = self.browse_ref('base.res_partner_address_4')
mapper = MyMapper(self.connector_env)
map_record = mapper.map_record(partner)
expected = {'parent_name': 'Agrolait'}
self.assertEqual(map_record.values(), expected)
self.assertEqual(map_record.values(for_create=True), expected)
class test_mapper_binding(common.TransactionCase):
""" Test Mapper with Bindings"""
def setUp(self):
super(test_mapper_binding, self).setUp()
self.session = ConnectorSession(self.cr, self.uid)
self.backend = mock.Mock(wraps=Backend('x', version='y'),
name='backend')
backend_record = mock.Mock()
backend_record.get_backend.return_value = self.backend
self.connector_env = ConnectorEnvironment(
backend_record, self.session, 'res.partner')
self.country_binder = mock.Mock(name='country_binder')
self.country_binder.return_value = self.country_binder
self.backend.get_class.return_value = self.country_binder
def test_mapping_m2o_to_backend(self):
""" Map a direct record with the m2o_to_backend modifier function """
class MyMapper(ImportMapper):
_model_name = 'res.partner'
direct = [(m2o_to_backend('country_id'), 'country')]
partner = self.env.ref('base.main_partner')
partner.write({'country_id': self.env.ref('base.ch').id})
self.country_binder.to_backend.return_value = 10
mapper = MyMapper(self.connector_env)
map_record = mapper.map_record(partner)
self.assertEqual(map_record.values(), {'country': 10})
self.country_binder.to_backend.assert_called_once_with(
partner.country_id.id, wrap=False)
def test_mapping_backend_to_m2o(self):
""" Map a direct record with the backend_to_m2o modifier function """
class MyMapper(ImportMapper):
_model_name = 'res.partner'
direct = [(backend_to_m2o('country'), 'country_id')]
record = {'country': 10}
ch = self.env.ref('base.ch')
self.country_binder.to_openerp.return_value = ch
mapper = MyMapper(self.connector_env)
map_record = mapper.map_record(record)
self.assertEqual(map_record.values(), {'country_id': ch.id})
self.country_binder.to_openerp.assert_called_once_with(
10, unwrap=False)
def test_mapping_record_children_no_map_child(self):
""" Map a record with children, using default MapChild """
backend = Backend('backend', '42')
@backend
class LineMapper(ImportMapper):
_model_name = 'res.currency.rate'
direct = [('name', 'name')]
@mapping
def price(self, record):
return {'rate': record['rate'] * 2}
@only_create
@mapping
def discount(self, record):
return {'test': .5}
@backend
class ObjectMapper(ImportMapper):
_model_name = 'res.currency'
direct = [('name', 'name')]
children = [('lines', 'line_ids', 'res.currency.rate')]
backend_record = mock.Mock()
backend_record.get_backend.side_effect = lambda *a: backend
env = ConnectorEnvironment(backend_record, self.session,
'res.currency')
record = {'name': 'SO1',
'lines': [{'name': '2013-11-07',
'rate': 10},
{'name': '2013-11-08',
'rate': 20}]}
mapper = ObjectMapper(env)
map_record = mapper.map_record(record)
expected = {'name': 'SO1',
'line_ids': [(0, 0, {'name': '2013-11-07',
'rate': 20}),
(0, 0, {'name': '2013-11-08',
'rate': 40})]
}
self.assertEqual(map_record.values(), expected)
expected = {'name': 'SO1',
'line_ids': [(0, 0, {'name': '2013-11-07',
'rate': 20,
'test': .5}),
(0, 0, {'name': '2013-11-08',
'rate': 40,
'test': .5})]
}
self.assertEqual(map_record.values(for_create=True), expected)
def test_mapping_record_children(self):
""" Map a record with children, using defined MapChild """
backend = Backend('backend', '42')
@backend
class LineMapper(ImportMapper):
_model_name = 'res.currency.rate'
direct = [('name', 'name')]
@mapping
def price(self, record):
return {'rate': record['rate'] * 2}
@only_create
@mapping
def discount(self, record):
return {'test': .5}
@backend
class SaleLineImportMapChild(ImportMapChild):
_model_name = 'res.currency.rate'
def format_items(self, items_values):
return [('ABC', values) for values in items_values]
@backend
class ObjectMapper(ImportMapper):
_model_name = 'res.currency'
direct = [('name', 'name')]
children = [('lines', 'line_ids', 'res.currency.rate')]
backend_record = mock.Mock()
backend_record.get_backend.side_effect = lambda *a: backend
env = ConnectorEnvironment(backend_record, self.session,
'res.currency')
record = {'name': 'SO1',
'lines': [{'name': '2013-11-07',
'rate': 10},
{'name': '2013-11-08',
'rate': 20}]}
mapper = ObjectMapper(env)
map_record = mapper.map_record(record)
expected = {'name': 'SO1',
'line_ids': [('ABC', {'name': '2013-11-07',
'rate': 20}),
('ABC', {'name': '2013-11-08',
'rate': 40})]
}
self.assertEqual(map_record.values(), expected)
expected = {'name': 'SO1',
'line_ids': [('ABC', {'name': '2013-11-07',
'rate': 20,
'test': .5}),
('ABC', {'name': '2013-11-08',
'rate': 40,
'test': .5})]
}
self.assertEqual(map_record.values(for_create=True), expected)
def test_modifier_import_filter_field(self):
""" A direct mapping with a modifier must still be considered
from the list of fields
"""
class MyMapper(ImportMapper):
direct = [('field', 'field2'),
('no_field', 'no_field2'),
(convert('name', int), 'out_name')]
env = mock.MagicMock()
record = {'name': '300', 'field': 'value', 'no_field': 'no_value'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 300, 'field2': 'value'}
self.assertEqual(map_record.values(fields=['field', 'name']), expected)
self.assertEqual(map_record.values(for_create=True,
fields=['field', 'name']), expected)
def test_modifier_export_filter_field(self):
""" A direct mapping with a modifier on an export mapping """
class MyMapper(ExportMapper):
direct = [('field', 'field2'),
('no_field', 'no_field2'),
(convert('name', int), 'out_name')]
env = mock.MagicMock()
record = {'name': '300', 'field': 'value', 'no_field': 'no_value'}
mapper = MyMapper(env)
map_record = mapper.map_record(record)
expected = {'out_name': 300, 'field2': 'value'}
self.assertEqual(map_record.values(fields=['field', 'name']), expected)
self.assertEqual(map_record.values(for_create=True,
fields=['field', 'name']), expected)
| krattai/ss-middleware | connector/connector/tests/test_mapper.py | Python | bsd-2-clause | 25,592 |
import abc
from default_metrics import DefaultMetrics
class DefaultEnvironment(object):
"""
Abstract class for environments. All environments must implement these
methods to be able to work with SBB.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.metrics_ = DefaultMetrics(self)
@abc.abstractmethod
def reset(self):
"""
Method that is called at the beginning of each run by SBB, to reset the
variables that will be used by the generations.
"""
@abc.abstractmethod
def setup(self, teams_population):
"""
Method that is called at the beginning of each generation by SBB, to set the
variables that will be used by the generationand remove the ones that are no
longer being used.
"""
@abc.abstractmethod
def evaluate_point_population(self, teams_population):
"""
Evaluate the fitness of the point population, to define which points will be removed
or added in the next generation, when setup_point_population() is executed.
"""
@abc.abstractmethod
def evaluate_teams_population_for_training(self, teams_population):
"""
Evaluate all the teams using the evaluate_team() method, and sets metrics. Used only
for training.
"""
@abc.abstractmethod
def evaluate_team(self, team, mode):
"""
Evaluate the team using the environment inputs. May be executed in the training
or the test mode.
This method must set the attribute results_per_points of the team, if you intend to
use pareto.
"""
@abc.abstractmethod
def validate(self, current_generation, teams_population):
"""
For classification:
- Return the best team for the teams_population using the champion set.
For reinforcement:
- All teams go against the validation set, and then the best one go against the champion set
"""
def hall_of_fame(self):
return [] | jpbonson/SBBReinforcementLearner | SBB/environments/default_environment.py | Python | bsd-2-clause | 2,122 |
from font import font
from qt import QFont
class b( font ):
"""
<b> makes text bold.
<p>
<b>Properties:</b>
<br>
See <a href="font.html"><font></a> for properties.
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the for constructor.
"""
apply( font.__init__, (self,) + args )
self.setWeight( QFont.Bold )
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
return [ "<b>" ] + font.getHtml( self ) + [ "</b>" ]
| derekmd/opentag-presenter | tags/b.py | Python | bsd-2-clause | 619 |
from rest_framework import serializers
from feti.models.campus import Campus
from feti.serializers.course_serializer import CourseSerializer
__author__ = 'irwan'
class CampusSerializer(serializers.ModelSerializer):
class Meta:
model = Campus
def to_representation(self, instance):
res = super(CampusSerializer, self).to_representation(instance)
res['long_description'] = instance.long_description
res['courses'] = CourseSerializer(instance.courses.all(), many=True).data
if instance.address:
res['address'] = instance.address.__unicode__()
if instance.provider:
res['title'] = instance.provider.__unicode__()
if instance.location:
res['location'] = {
'lat': instance.location.y,
'lng': instance.location.x}
return res
| cchristelis/feti | django_project/feti/serializers/campus_serializer.py | Python | bsd-2-clause | 862 |
__author__ = 'rohe0002'
import json
import logging
from urlparse import urlparse
from bs4 import BeautifulSoup
from mechanize import ParseResponseEx
from mechanize._form import ControlNotFoundError, AmbiguityError
from mechanize._form import ListControl
logger = logging.getLogger(__name__)
NO_CTRL = "No submit control with the name='%s' and value='%s' could be found"
class FlowException(Exception):
def __init__(self, function="", content="", url=""):
Exception.__init__(self)
self.function = function
self.content = content
self.url = url
def __str__(self):
return json.dumps(self.__dict__)
class InteractionNeeded(Exception):
pass
def NoneFunc():
return None
class RResponse():
"""
A Response class that behaves in the way that mechanize expects it.
Links to a requests.Response
"""
def __init__(self, resp):
self._resp = resp
self.index = 0
self.text = resp.text
if isinstance(self.text, unicode):
if resp.encoding == "UTF-8":
self.text = self.text.encode("utf-8")
else:
self.text = self.text.encode("latin-1")
self._len = len(self.text)
self.url = str(resp.url)
self.statuscode = resp.status_code
def geturl(self):
return self._resp.url
def __getitem__(self, item):
try:
return getattr(self._resp, item)
except AttributeError:
return getattr(self._resp.headers, item)
def __getattribute__(self, item):
try:
return getattr(self._resp, item)
except AttributeError:
return getattr(self._resp.headers, item)
def read(self, size=0):
"""
Read from the content of the response. The class remembers what has
been read so it's possible to read small consecutive parts of the
content.
:param size: The number of bytes to read
:return: Somewhere between zero and 'size' number of bytes depending
on how much it left in the content buffer to read.
"""
if size:
if self._len < size:
return self.text
else:
if self._len == self.index:
part = None
elif self._len - self.index < size:
part = self.text[self.index:]
self.index = self._len
else:
part = self.text[self.index:self.index + size]
self.index += size
return part
else:
return self.text
class Interaction(object):
def __init__(self, httpc, interactions=None):
self.httpc = httpc
self.interactions = interactions
self.who = "Form process"
def pick_interaction(self, _base="", content="", req=None):
logger.info("pick_interaction baseurl: %s" % _base)
unic = content
if content:
_bs = BeautifulSoup(content)
else:
_bs = None
for interaction in self.interactions:
_match = 0
for attr, val in interaction["matches"].items():
if attr == "url":
logger.info("matching baseurl against: %s" % val)
if val == _base:
_match += 1
elif attr == "title":
logger.info("matching '%s' against title" % val)
if _bs is None:
break
if _bs.title is None:
break
if val in _bs.title.contents:
_match += 1
else:
_c = _bs.title.contents
if isinstance(_c, list) and not isinstance(_c,
basestring):
for _line in _c:
if val in _line:
_match += 1
continue
elif attr == "content":
if unic and val in unic:
_match += 1
elif attr == "class":
if req and val == req:
_match += 1
if _match == len(interaction["matches"]):
logger.info("Matched: %s" % interaction["matches"])
return interaction
raise InteractionNeeded("No interaction matched")
def pick_form(self, response, url=None, **kwargs):
"""
Picks which form in a web-page that should be used
:param response: A HTTP request response. A DResponse instance
:param content: The HTTP response content
:param url: The url the request was sent to
:param kwargs: Extra key word arguments
:return: The picked form or None of no form matched the criteria.
"""
forms = ParseResponseEx(response)
if not forms:
raise FlowException(content=response.text, url=url)
#if len(forms) == 1:
# return forms[0]
#else:
_form = None
# ignore the first form, because I use ParseResponseEx which adds
# one form at the top of the list
forms = forms[1:]
if len(forms) == 1:
_form = forms[0]
else:
if "pick" in kwargs:
_dict = kwargs["pick"]
for form in forms:
if _form:
break
for key, _ava in _dict.items():
if key == "form":
_keys = form.attrs.keys()
for attr, val in _ava.items():
if attr in _keys and val == form.attrs[attr]:
_form = form
elif key == "control":
prop = _ava["id"]
_default = _ava["value"]
try:
orig_val = form[prop]
if isinstance(orig_val, basestring):
if orig_val == _default:
_form = form
elif _default in orig_val:
_form = form
except KeyError:
pass
except ControlNotFoundError:
pass
elif key == "method":
if form.method == _ava:
_form = form
else:
_form = None
if not _form:
break
elif "index" in kwargs:
_form = forms[int(kwargs["index"])]
return _form
def do_click(self, form, **kwargs):
"""
Emulates the user clicking submit on a form.
:param form: The form that should be submitted
:return: What do_request() returns
"""
if "click" in kwargs:
request = None
_name = kwargs["click"]
try:
_ = form.find_control(name=_name)
request = form.click(name=_name)
except AmbiguityError:
# more than one control with that name
_val = kwargs["set"][_name]
_nr = 0
while True:
try:
cntrl = form.find_control(name=_name, nr=_nr)
if cntrl.value == _val:
request = form.click(name=_name, nr=_nr)
break
else:
_nr += 1
except ControlNotFoundError:
raise Exception(NO_CTRL % (_name, _val))
else:
request = form.click()
headers = {}
for key, val in request.unredirected_hdrs.items():
headers[key] = val
url = request._Request__original
if form.method == "POST":
return self.httpc.send(url, "POST", data=request.data,
headers=headers)
else:
return self.httpc.send(url, "GET", headers=headers)
def select_form(self, orig_response, **kwargs):
"""
Pick a form on a web page, possibly enter some information and submit
the form.
:param orig_response: The original response (as returned by requests)
:return: The response do_click() returns
"""
logger.info("select_form")
response = RResponse(orig_response)
try:
_url = response.url
except KeyError:
_url = kwargs["location"]
form = self.pick_form(response, _url, **kwargs)
#form.backwards_compatible = False
if not form:
raise Exception("Can't pick a form !!")
if "set" in kwargs:
for key, val in kwargs["set"].items():
if key.startswith("_"):
continue
if "click" in kwargs and kwargs["click"] == key:
continue
try:
form[key] = val
except ControlNotFoundError:
pass
except TypeError:
cntrl = form.find_control(key)
if isinstance(cntrl, ListControl):
form[key] = [val]
else:
raise
if form.action in kwargs["conv"].my_endpoints():
return {"SAMLResponse": form["SAMLResponse"],
"RelayState": form["RelayState"]}
return self.do_click(form, **kwargs)
#noinspection PyUnusedLocal
def chose(self, orig_response, path, **kwargs):
"""
Sends a HTTP GET to a url given by the present url and the given
relative path.
:param orig_response: The original response
:param content: The content of the response
:param path: The relative path to add to the base URL
:return: The response do_click() returns
"""
if not path.startswith("http"):
try:
_url = orig_response.url
except KeyError:
_url = kwargs["location"]
part = urlparse(_url)
url = "%s://%s%s" % (part[0], part[1], path)
else:
url = path
logger.info("GET %s" % url)
return self.httpc.send(url, "GET")
#return resp, ""
def post_form(self, orig_response, **kwargs):
"""
The same as select_form but with no possibility of change the content
of the form.
:param httpc: A HTTP Client instance
:param orig_response: The original response (as returned by requests)
:param content: The content of the response
:return: The response do_click() returns
"""
response = RResponse(orig_response)
form = self.pick_form(response, **kwargs)
return self.do_click(form, **kwargs)
#noinspection PyUnusedLocal
def parse(self, orig_response, **kwargs):
# content is a form from which I get the SAMLResponse
response = RResponse(orig_response)
form = self.pick_form(response, **kwargs)
#form.backwards_compatible = False
if not form:
raise InteractionNeeded("Can't pick a form !!")
return {"SAMLResponse": form["SAMLResponse"],
"RelayState": form["RelayState"]}
#noinspection PyUnusedLocal
def interaction(self, args):
_type = args["type"]
if _type == "form":
return self.select_form
elif _type == "link":
return self.chose
elif _type == "response":
return self.parse
else:
return NoneFunc
# ========================================================================
class Action(object):
def __init__(self, args):
self.args = args or {}
self.request = None
def update(self, dic):
self.args.update(dic)
#noinspection PyUnusedLocal
def post_op(self, result, conv, args):
pass
def __call__(self, httpc, conv, location, response, content, features):
intact = Interaction(httpc)
function = intact.interaction(self.args)
try:
_args = self.args.copy()
except (KeyError, AttributeError):
_args = {}
_args.update({"location": location, "features": features, "conv": conv})
logger.info("<-- FUNCTION: %s" % function.__name__)
logger.info("<-- ARGS: %s" % _args)
result = function(response, **_args)
self.post_op(result, conv, _args)
return result
| rohe/saml2test | src/saml2test/interaction.py | Python | bsd-2-clause | 13,079 |
import sys
from pyasn1.compat.octets import octs2ints
from pyasn1 import error
from pyasn1 import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr and sys.stderr.write or None
def __init__(self, *flags):
self._flags = flagNone
if not self.defaultPrinter:
raise error.PyAsn1Error('Null debug writer specified')
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
| coruus/pyasn1 | pyasn1/debug.py | Python | bsd-2-clause | 1,667 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackedUser'
db.create_table('user_analytics_trackeduser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cookie', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('user_agent', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('user_analytics', ['TrackedUser'])
def backwards(self, orm):
# Deleting model 'TrackedUser'
db.delete_table('user_analytics_trackeduser')
models = {
'user_analytics.trackeduser': {
'Meta': {'object_name': 'TrackedUser'},
'cookie': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['user_analytics']
| rburhum/django-user-analytics | user_analytics/migrations/0001_initial.py | Python | bsd-2-clause | 1,229 |
__all__ = ['BioCCollection']
from meta import _MetaInfons, _MetaIter
from compat import _Py2Next
class BioCCollection(_Py2Next, _MetaInfons, _MetaIter):
def __init__(self, collection=None):
self.infons = dict()
self.source = ''
self.date = ''
self.key = ''
self.documents = list()
if collection is not None:
self.infons = collection.infons
self.source = collection.source
self.date = collection.date
self.key = collection.key
self.documents = collection.documents
def __str__(self):
s = 'source: ' + self.source + '\n'
s += 'date: ' + self.date + '\n'
s += 'key: ' + self.key + '\n'
s += str(self.infons) + '\n'
s += str(self.documents) + '\n'
return s
def _iterdata(self):
return self.documents
def clear_documents(self):
self.documents = list()
def get_document(self, doc_idx):
return self.documents[doc_idx]
def add_document(self, document):
self.documents.append(document)
def remove_document(self, document):
if type(document) is int:
self.dcouments.remove(self.documents[document])
else:
self.documents.remove(document) # TBC
| SuLab/PyBioC | src/bioc/bioc_collection.py | Python | bsd-2-clause | 1,302 |
#-*- coding: utf-8 -*-
# Author: Matt Earnshaw <matt@earnshaw.org.uk>
from __future__ import absolute_import
import os
import sys
import sunpy
from PyQt4.QtGui import QApplication
from sunpy.gui.mainwindow import MainWindow
from sunpy.io import UnrecognizedFileTypeError
class Plotman(object):
""" Wraps a MainWindow so PlotMan instances can be created via the CLI.
Examples
--------
from sunpy.gui import Plotman
plots = Plotman("data/examples")
plots.show()
"""
def __init__(self, *paths):
""" *paths: directories containing FITS paths
or FITS paths to be opened in PlotMan """
self.app = QApplication(sys.argv)
self.main = MainWindow()
self.open_files(paths)
def open_files(self, inputs):
VALID_EXTENSIONS = [".jp2", ".fits", ".fts"]
to_open = []
# Determine files to process
for input_ in inputs:
if os.path.isfile(input_):
to_open.append(input_)
elif os.path.isdir(input_):
for file_ in os.listdir(input_):
to_open.append(file_)
else:
raise IOError("Path " + input_ + " does not exist.")
# Load files
for filepath in to_open:
name, ext = os.path.splitext(filepath) #pylint: disable=W0612
if ext.lower() in VALID_EXTENSIONS:
try:
self.main.add_tab(filepath, os.path.basename(filepath))
except UnrecognizedFileTypeError:
pass
def show(self):
self.main.show()
self.app.exec_()
if __name__=="__main__":
from sunpy.gui import Plotman
plots = Plotman(sunpy.AIA_171_IMAGE)
plots.show()
| jslhs/sunpy | sunpy/gui/__init__.py | Python | bsd-2-clause | 1,811 |
from setuptools import setup
from setuptools.extension import Extension
import numpy as np
import os
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
include_dirs = [np.get_include()]
setup(name="pystruct",
version="0.3.2",
install_requires=["ad3", "numpy"],
packages=['pystruct', 'pystruct.learners', 'pystruct.inference',
'pystruct.models', 'pystruct.utils', 'pystruct.datasets',
'pystruct.tests', 'pystruct.tests.test_learners',
'pystruct.tests.test_models', 'pystruct.tests.test_inference',
'pystruct.tests.test_utils'],
include_package_data=True,
description="Structured Learning and Prediction in Python",
author="Andreas Mueller",
author_email="t3kcit@gmail.com",
url="http://pystruct.github.io",
license="BSD 2-clause",
use_2to3=True,
ext_modules=[Extension("pystruct.models.utils", ["src/utils.c"],
include_dirs=include_dirs),
Extension("pystruct.inference._viterbi",
["pystruct/inference/_viterbi.c"],
include_dirs=include_dirs)],
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
| pystruct/pystruct | setup.py | Python | bsd-2-clause | 1,928 |
"""
yubistack.exceptions
~~~~~~~~~~~~~~~~~~~~
List all custom exceptions here
"""
STATUS_CODES = {
# YKAuth
'BAD_PASSWORD': 'Invalid password',
'DISABLED_TOKEN': 'Token is disabled',
'UNKNOWN_USER': 'Unknown user',
'INVALID_TOKEN': 'Token is not associated with user',
# YKVal
'BACKEND_ERROR': 'Backend error',
'BAD_OTP': 'Invalid OTP',
'BAD_SIGNATURE': 'The HMAC signature verification failed',
'DELAYED_OTP': 'Expired OTP',
'INVALID_PARAMETER': 'The request has invalid parameter',
'MISSING_PARAMETER': 'The request missing parameter',
'NO_SUCH_CLIENT': 'The request id does not exist',
'NOT_ENOUGH_ANSWERS': 'Server could not get requested number of syncs before timeout',
'OPERATION_NOT_ALLOWED': 'The request is now allowed',
'REPLAYED_OTP': 'Replayed OTP',
'REPLAYED_REQUEST': 'Server has seen the OTP/Nonce combination before',
# YKKSM
'CORRUPT_OTP': 'Corrupt OTP',
'MISSING_OTP': 'No OTP provided',
'UNKNOWN_TOKEN': 'Unknown yubikey',
}
class YubistackError(Exception):
""" Yubistack Exception """
NAME = 'Yubistack error'
def __init__(self, *args):
super(YubistackError, self).__init__(*args)
self.error_code = self.args[0]
def __str__(self):
message = STATUS_CODES[self.error_code]
if len(self.args) == 2:
message += ': %s' % self.args[1]
return message
class YKAuthError(YubistackError):
""" Error returned by the Client class """
NAME = 'Authentication error'
class YKValError(YubistackError):
""" Error returned by the Validator class """
NAME = 'Validation error'
class YKSyncError(YubistackError):
""" Error returned by the Sync class """
NAME = 'Sync error'
class YKKSMError(YubistackError):
""" Error returned by the Decryptor class """
NAME = 'Decryption error'
| oriordan/yubistack | yubistack/exceptions.py | Python | bsd-2-clause | 1,873 |
import datetime
import os.path
kDirName, filename = os.path.split(os.path.abspath(__file__))
kFixtureFile = os.path.join(kDirName, 'types.db')
kTestFile = os.path.join(kDirName, 'test.db')
kTestDirectory = os.path.join(kDirName, 'tempdir', 'child')
kConfigFile = os.path.join(kDirName, 'testing.ini')
kConfigFile2 = os.path.join(kDirName, 'testing2.ini')
kLockFile = os.path.join(kDirName, 'lockfile')
kAwsBucket = 'orion.aws.testing'
kImportFile = os.path.join(kDirName, 'import.json')
kImportDirectory = os.path.join(kDirName, 'to_import')
kRepoDirectory = os.path.join(kDirName, 'imported')
kImportDatabase = os.path.join(kDirName, 'imported.db')
kExampleTextFile = os.path.join(kDirName, 'example_text_file.txt')
kExampleImageFile = os.path.join(kDirName, 'example_image.png')
kExampleTemporaryImageFile = os.path.join(kDirName, 'example_image_temp.png')
kExampleDownloadedFile = os.path.join(kDirName, 'fetched.dat')
kExampleCheckpointFile = os.path.join(kDirName, 'example_checkpoint.dat')
kExampleNewCheckpointFile = os.path.join(kDirName, 'example_new_checkpoint.dat')
kS3HostName = 's3.amazonaws.com'
kExampleBucket = 'rigor-test-bucket'
kExampleCredentials = 'test_credentials'
kExampleImageDimensions = (1080, 3840, 3)
kNonexistentFile = '/xxxzzfooxxx'
kExamplePercept = {
'annotations': [
{'boundary': ((1, 10), (3, 6), (1, 10), (10, 3)), 'confidence': 4, 'domain': u'test', 'model': u'e', 'properties': {u'prop': u'value'}, 'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252667), 'tags': [ u'test_tag', ]},
{'boundary': ((10, 4), (4, 8), (3, 8), (6, 3)), 'confidence': 5, 'domain': u'test', 'model': u'e', 'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252787)},
{'boundary': ((1, 7), (1, 9), (7, 1), (3, 5)), 'confidence': 4, 'domain': u'test', 'model': u'd', 'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252969)}
],
'device_id': u'device_1938401',
'format': u'image/jpeg',
'hash': u'edd66afcf0eb4f5ef392fd8e94ff0ff2139ddc01',
'locator': u'example://mybucket/182828291',
'properties': {u'val1': u'val2'},
'sensors': {'acceleration_x': 0.1, 'acceleration_y': 0.2, 'acceleration_z': 0.3, 'altitude': 123.0, 'altitude_accuracy': 2.34, 'bearing': 180.1, 'bearing_accuracy': 1.23, 'location': (34.56, -120.2), 'location_accuracy': 0.1, 'location_provider': u'gps', 'speed': 60.1},
'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252487),
'x_size': 800, 'y_size': 600
}
| blindsightcorp/rigor | test/constants.py | Python | bsd-2-clause | 2,408 |
from __future__ import absolute_import, division, print_function, unicode_literals
from django.contrib.auth.decorators import user_passes_test
from django_otp import user_has_device
from django_otp.conf import settings
def otp_required(view=None, redirect_field_name='next', login_url=None, if_configured=False):
"""
Similar to :func:`~django.contrib.auth.decorators.login_required`, but
requires the user to be :term:`verified`. By default, this redirects users
to :setting:`OTP_LOGIN_URL`.
:param if_configured: If ``True``, an authenticated user with no confirmed
OTP devices will be allowed. Default is ``False``.
:type if_configured: bool
"""
if login_url is None:
login_url = settings.OTP_LOGIN_URL
def test(user):
return user.is_verified() or (if_configured and user.is_authenticated() and not user_has_device(user))
decorator = user_passes_test(test, login_url=login_url, redirect_field_name=redirect_field_name)
return decorator if (view is None) else decorator(view)
| altanawealth/django-otp | django_otp/decorators.py | Python | bsd-2-clause | 1,053 |
import os
import unittest
import numpy as np
from scipy import sparse
from ParamSklearn.components.data_preprocessing.one_hot_encoding import OneHotEncoder
from ParamSklearn.util import _test_preprocessing
class OneHotEncoderTest(unittest.TestCase):
def setUp(self):
self.categorical = [True,
True,
True,
False,
False,
True,
True,
True,
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
True,
True,
True,
True]
this_directory = os.path.dirname(__file__)
self.X_train = np.loadtxt(os.path.join(this_directory, "dataset.pkl"))
def test_default_configuration(self):
transformations = []
for i in range(10):
configuration_space = OneHotEncoder.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = OneHotEncoder(random_state=1,
categorical_features=self.categorical,
**{hp_name: default[hp_name] for hp_name in
default if default[hp_name] is not None})
transformer = preprocessor.fit(self.X_train.copy())
Xt = transformer.transform(self.X_train.copy())
transformations.append(Xt)
if len(transformations) > 1:
self.assertFalse(
(transformations[-1] != transformations[-2]).all())
def test_default_configuration_no_encoding(self):
transformations = []
for i in range(10):
transformation, original = _test_preprocessing(OneHotEncoder)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue(
(transformations[-1] == transformations[-2]).all())
def test_default_configuration_sparse_data(self):
transformations = []
self.X_train[~np.isfinite(self.X_train)] = 0
self.X_train = sparse.csc_matrix(self.X_train)
for i in range(10):
configuration_space = OneHotEncoder.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = OneHotEncoder(random_state=1,
categorical_features=self.categorical,
**{hp_name: default[hp_name] for
hp_name in
default if
default[hp_name] is not None})
transformer = preprocessor.fit(self.X_train.copy())
Xt = transformer.transform(self.X_train.copy())
transformations.append(Xt)
if len(transformations) > 1:
self.assertFalse(
(transformations[-1].todense() != transformations[
-2].todense()).all())
def test_default_configuration_sparse_no_encoding(self):
transformations = []
for i in range(10):
transformation, original = _test_preprocessing(OneHotEncoder,
make_sparse=True)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation.todense() == original.todense()).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue(
(transformations[-1].todense() == transformations[-2].todense()).all())
| automl/paramsklearn | tests/components/data_preprocessing/test_one_hot_encoding.py | Python | bsd-3-clause | 4,887 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_eulasees', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='snippettag',
name='snippet',
field=models.ForeignKey(default=0, to='django_eulasees.EulaSnippet'),
preserve_default=False,
),
migrations.AlterField(
model_name='snippettag',
name='tag',
field=models.ForeignKey(to='django_eulasees.Tag'),
preserve_default=True,
),
]
| swfiua/django_eulasees | django_eulasees/migrations/0002_auto_20141206_1419.py | Python | bsd-3-clause | 670 |
"""
PatternGenerator abstract class, basic example concrete class, and
multichannel support.
PatternGenerators support both single-channel patterns, i.e. bare
arrays, and multiple channels, such as for color images. See
``PatternGenerator.__call__`` and ``PatternGenerator.channels`` for
more information.
"""
import numpy as np
from numpy import pi
import collections
import param
from param.parameterized import ParamOverrides
from holoviews import HoloMap, Image, RGB, Dimension
from holoviews.core import BoundingBox, BoundingRegionParameter, SheetCoordinateSystem
from .transferfn import TransferFn
# CEBALERT: PatternGenerator has become a bit of a monster abstract
# class. Can it be split into the minimum required to specify the
# interface, with a subclass implementing the rest (this subclass
# still being above the rest of the PatternGenerators)? We want to
# make it easy to add new types of PatternGenerator that don't match
# the assumptions of the current ones (OneDPowerSpectrum is an example
# of a PG that doesn't match the current assumptions), but still lets
# them be used like the current ones.
# (PatternGenerator-->TwoDPatternGenerator?)
# JLALERT: PatternGenerator should have
# override_plasticity_state/restore_plasticity_state functions which
# can override the plasticity of any output_fn that has state, in case
# anyone ever uses such an object in a PatternGenerator. Will also
# need to support Composite patterns.
class PatternGenerator(param.Parameterized):
"""
A class hierarchy for callable objects that can generate 2D patterns.
Once initialized, PatternGenerators can be called to generate a
value or a matrix of values from a 2D function, typically
accepting at least x and y.
A PatternGenerator's Parameters can make use of Parameter's
precedence attribute to specify the order in which they should
appear, e.g. in a GUI. The precedence attribute has a nominal
range of 0.0 to 1.0, with ordering going from 0.0 (first) to 1.0
(last), but any value is allowed.
The orientation and layout of the pattern matrices is defined by
the SheetCoordinateSystem class, which see.
Note that not every parameter defined for a PatternGenerator will
be used by every subclass. For instance, a Constant pattern will
ignore the x, y, orientation, and size parameters, because the
pattern does not vary with any of those parameters. However,
those parameters are still defined for all PatternGenerators, even
Constant patterns, to allow PatternGenerators to be scaled, rotated,
translated, etc. uniformly.
"""
__abstract = True
bounds = BoundingRegionParameter(
default=BoundingBox(points=((-0.5,-0.5), (0.5,0.5))),precedence=-1,
doc="BoundingBox of the area in which the pattern is generated.")
xdensity = param.Number(default=256,bounds=(0,None),precedence=-1,doc="""
Density (number of samples per 1.0 length) in the x direction.""")
ydensity = param.Number(default=256,bounds=(0,None),precedence=-1,doc="""
Density (number of samples per 1.0 length) in the y direction.
Typically the same as the xdensity.""")
x = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=0.20,doc="""
X-coordinate location of pattern center.""")
y = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=0.21,doc="""
Y-coordinate location of pattern center.""")
z = param.ClassSelector(default=None, precedence=-1, class_=Dimension, doc="""
The Dimension object associated with the z-values generated by
the PatternGenerator . If None, uses the default set by
HoloViews.Image.""")
group = param.String(default='Pattern', precedence=-1, doc="""
The group name assigned to the returned HoloViews object.""")
position = param.Composite(attribs=['x','y'],precedence=-1,doc="""
Coordinates of location of pattern center.
Provides a convenient way to set the x and y parameters together
as a tuple (x,y), but shares the same actual storage as x and y
(and thus only position OR x and y need to be specified).""")
orientation = param.Number(default=0.0,softbounds=(0.0,2*pi),precedence=0.40,doc="""
Polar angle of pattern, i.e., the orientation in the Cartesian coordinate
system, with zero at 3 o'clock and increasing counterclockwise.""")
size = param.Number(default=1.0,bounds=(0.0,None),softbounds=(0.0,6.0),
precedence=0.30,doc="""Determines the overall size of the pattern.""")
scale = param.Number(default=1.0,softbounds=(0.0,2.0),precedence=0.10,doc="""
Multiplicative strength of input pattern, defaulting to 1.0""")
offset = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=0.11,doc="""
Additive offset to input pattern, defaulting to 0.0""")
mask = param.Parameter(default=None,precedence=-1,doc="""
Optional object (expected to be an array) with which to multiply the
pattern array after it has been created, before any output_fns are
applied. This can be used to shape the pattern.""")
# Note that the class type is overridden to PatternGenerator below
mask_shape = param.ClassSelector(param.Parameterized,default=None,precedence=0.06,doc="""
Optional PatternGenerator used to construct a mask to be applied to
the pattern.""")
output_fns = param.HookList(default=[], precedence=0.08,doc="""
Optional function(s) to apply to the pattern array after it has been created.
Can be used for normalization, thresholding, etc.""")
def __init__(self,**params):
super(PatternGenerator, self).__init__(**params)
self.set_matrix_dimensions(self.bounds, self.xdensity, self.ydensity)
def __call__(self,**params_to_override):
"""
Call the subclass's 'function' method on a rotated and scaled
coordinate system.
Creates and fills an array with the requested pattern. If
called without any params, uses the values for the Parameters
as currently set on the object. Otherwise, any params
specified override those currently set on the object.
"""
if 'output_fns' in params_to_override:
self.warning("Output functions specified through the call method will be ignored.")
p=ParamOverrides(self,params_to_override)
# CEBERRORALERT: position parameter is not currently
# supported. We should delete the position parameter or fix
# this.
#
# position=params_to_override.get('position',None) if position
# is not None: x,y = position
self._setup_xy(p.bounds,p.xdensity,p.ydensity,p.x,p.y,p.orientation)
fn_result = self.function(p)
self._apply_mask(p,fn_result)
if p.scale != 1.0:
result = p.scale * fn_result
else:
result = fn_result
if p.offset != 0.0:
result += p.offset
for of in p.output_fns:
of(result)
return result
def __getitem__(self, coords):
value_dims = {}
if self.num_channels() in [0, 1]:
raster, data = Image, self()
value_dims = {'value_dimensions':[self.z]} if self.z else value_dims
elif self.num_channels() in [3,4]:
raster = RGB
data = np.dstack(self.channels().values()[1:])
image = raster(data, bounds=self.bounds,
**dict(group=self.group,
label=self.__class__.__name__, **value_dims))
# Works round a bug fixed shortly after HoloViews 1.0.0 release
return image if isinstance(coords, slice) else image.__getitem__(coords)
def channels(self, use_cached=False, **params_to_override):
"""
Channels() adds a shared interface for single channel and
multichannel structures. It will always return an ordered
dict: its first element is the single channel of the pattern
(if single-channel) or the channel average (if multichannel);
the successive elements are the individual channels' arrays
(key: 0,1,..N-1).
"""
return collections.OrderedDict({ 'default':self.__call__(**params_to_override) })
def num_channels(self):
"""
Query the number of channels implemented by the
PatternGenerator. In case of single-channel generators this
will return 1; in case of multichannel, it will return the
number of channels (eg, in the case of RGB images it would
return '3', Red-Green-Blue, even though the OrderedDict
returned by channels() will have 4 elements -- the 3 channels
+ their average).
"""
return 1
def _setup_xy(self,bounds,xdensity,ydensity,x,y,orientation):
"""
Produce pattern coordinate matrices from the bounds and
density (or rows and cols), and transforms them according to
x, y, and orientation.
"""
self.debug("bounds=%s, xdensity=%s, ydensity=%s, x=%s, y=%s, orientation=%s",bounds,xdensity,ydensity,x,y,orientation)
# Generate vectors representing coordinates at which the pattern
# will be sampled.
# CB: note to myself - use slice_._scs if supplied?
x_points,y_points = SheetCoordinateSystem(bounds,xdensity,ydensity).sheetcoordinates_of_matrixidx()
# Generate matrices of x and y sheet coordinates at which to
# sample pattern, at the correct orientation
self.pattern_x, self.pattern_y = self._create_and_rotate_coordinate_arrays(x_points-x,y_points-y,orientation)
def function(self,p):
"""
Function to draw a pattern that will then be scaled and rotated.
Instead of implementing __call__ directly, PatternGenerator
subclasses will typically implement this helper function used
by __call__, because that way they can let __call__ handle the
scaling and rotation for them. Alternatively, __call__ itself
can be reimplemented entirely by a subclass (e.g. if it does
not need to do any scaling or rotation), in which case this
function will be ignored.
"""
raise NotImplementedError
def _create_and_rotate_coordinate_arrays(self, x, y, orientation):
"""
Create pattern matrices from x and y vectors, and rotate them
to the specified orientation.
"""
# Using this two-liner requires that x increase from left to
# right and y decrease from left to right; I don't think it
# can be rewritten in so little code otherwise - but please
# prove me wrong.
pattern_y = np.subtract.outer(np.cos(orientation)*y, np.sin(orientation)*x)
pattern_x = np.add.outer(np.sin(orientation)*y, np.cos(orientation)*x)
return pattern_x, pattern_y
def _apply_mask(self,p,mat):
"""Create (if necessary) and apply the mask to the given matrix mat."""
mask = p.mask
ms=p.mask_shape
if ms is not None:
mask = ms(x=p.x+p.size*(ms.x*np.cos(p.orientation)-ms.y*np.sin(p.orientation)),
y=p.y+p.size*(ms.x*np.sin(p.orientation)+ms.y*np.cos(p.orientation)),
orientation=ms.orientation+p.orientation,size=ms.size*p.size,
bounds=p.bounds,ydensity=p.ydensity,xdensity=p.xdensity)
if mask is not None:
mat*=mask
def set_matrix_dimensions(self, bounds, xdensity, ydensity):
"""
Change the dimensions of the matrix into which the pattern
will be drawn. Users of this class should call this method
rather than changing the bounds, xdensity, and ydensity
parameters directly. Subclasses can override this method to
update any internal data structures that may depend on the
matrix dimensions.
"""
self.bounds = bounds
self.xdensity = xdensity
self.ydensity = ydensity
scs = SheetCoordinateSystem(bounds, xdensity, ydensity)
for of in self.output_fns:
if isinstance(of, TransferFn):
of.initialize(SCS=scs, shape=scs.shape)
def state_push(self):
"Save the state of the output functions, to be restored with state_pop."
for of in self.output_fns:
if hasattr(of,'state_push'):
of.state_push()
super(PatternGenerator, self).state_push()
def state_pop(self):
"Restore the state of the output functions saved by state_push."
for of in self.output_fns:
if hasattr(of,'state_pop'):
of.state_pop()
super(PatternGenerator, self).state_pop()
def anim(self, duration, offset=0, timestep=1,
label=None, unit=None,
time_fn=param.Dynamic.time_fn):
"""
duration: The temporal duration to animate in the units
defined on the global time function.
offset: The temporal offset from which the animation is
generated given the supplied pattern
timestep: The time interval between successive frames. The
duration must be an exact multiple of the timestep.
label: A label string to override the label of the global time
function (if not None).
unit: The unit string to override the unit value of the global
time function (if not None).
time_fn: The global time function object that is shared across
the time-varying objects that are being sampled.
Note that the offset, timestep and time_fn only affect
patterns parameterized by time-dependent number
generators. Otherwise, the frames are generated by successive
call to the pattern which may or may not be varying (e.g to
view the patterns contained within a Selector).
"""
frames = (duration // timestep) + 1
if duration % timestep != 0:
raise ValueError("The duration value must be an exact multiple of the timestep.")
if label is None:
label = time_fn.label if hasattr(time_fn, 'label') else 'Time'
unit = time_fn.unit if (not unit and hasattr(time_fn, 'unit')) else unit
vmap = HoloMap(kdims=[Dimension(label, unit=unit if unit else '')])
self.state_push()
with time_fn as t:
t(offset)
for i in range(frames):
vmap[t()] = self[:]
t += timestep
self.state_pop()
return vmap
## Support for compositional expressions of PatternGenerator objects
def _promote(self,other):
if not isinstance(other,PatternGenerator):
other = Constant(scale=other,offset=0)
return [self,other]
def _rpromote(self,other):
if not isinstance(other,PatternGenerator):
other = Constant(scale=other,offset=0)
return [other,self]
# Could define any of Python's operators here, esp. if they have operator or ufunc equivalents
def __add__ (self,other): return Composite(generators=self._promote(other),operator=np.add)
def __sub__ (self,other): return Composite(generators=self._promote(other),operator=np.subtract)
def __mul__ (self,other): return Composite(generators=self._promote(other),operator=np.multiply)
def __mod__ (self,other): return Composite(generators=self._promote(other),operator=np.mod)
def __pow__ (self,other): return Composite(generators=self._promote(other),operator=np.power)
def __div__ (self,other): return Composite(generators=self._promote(other),operator=np.divide)
def __and__ (self,other): return Composite(generators=self._promote(other),operator=np.minimum)
def __or__ (self,other): return Composite(generators=self._promote(other),operator=np.maximum)
def __radd__ (self,other): return Composite(generators=self._rpromote(other),operator=np.add)
def __rsub__ (self,other): return Composite(generators=self._rpromote(other),operator=np.subtract)
def __rmul__ (self,other): return Composite(generators=self._rpromote(other),operator=np.multiply)
def __rmod__ (self,other): return Composite(generators=self._rpromote(other),operator=np.mod)
def __rpow__ (self,other): return Composite(generators=self._rpromote(other),operator=np.power)
def __rdiv__ (self,other): return Composite(generators=self._rpromote(other),operator=np.divide)
def __rand__ (self,other): return Composite(generators=self._rpromote(other),operator=np.minimum)
def __ror__ (self,other): return Composite(generators=self._rpromote(other),operator=np.maximum)
def __neg__ (self): return Composite(generators=[Constant(scale=0),self],operator=np.subtract)
class abs_first(object):
@staticmethod
def reduce(x): return np.abs(x[0])
def __abs__ (self): return Composite(generators=[self],operator=self.abs_first)
def pil(self, **params_to_override):
"""Returns a PIL image for this pattern, overriding parameters if provided."""
from PIL.Image import fromarray
nchans = self.num_channels()
if nchans in [0, 1]:
mode, arr = None, self(**params_to_override)
arr = (255.0 / arr.max() * (arr - arr.min())).astype(np.uint8)
elif nchans in [3,4]:
mode = 'RGB' if nchans==3 else 'RGBA'
arr = np.dstack(self.channels(**params_to_override).values()[1:])
arr = (255.0*arr).astype(np.uint8)
else:
raise ValueError("Unsupported number of channels")
return fromarray(arr, mode)
# Override class type; must be set here rather than when mask_shape is declared,
# to avoid referring to class not yet constructed
PatternGenerator.params('mask_shape').class_=PatternGenerator
# Trivial example of a PatternGenerator, provided for when a default is
# needed. The other concrete PatternGenerator classes are stored
# elsewhere, to be imported as needed.
class Constant(PatternGenerator):
"""Constant pattern generator, i.e., a solid, uniform field of the same value."""
# The orientation is ignored, so we don't show it in
# auto-generated lists of parameters (e.g. in the GUI)
orientation = param.Number(precedence=-1)
# Optimization: We use a simpler __call__ method here to skip the
# coordinate transformations (which would have no effect anyway)
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
shape = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
result = p.scale*np.ones(shape, np.float)+p.offset
self._apply_mask(p,result)
for of in p.output_fns:
of(result)
return result
class CompositeBase(PatternGenerator):
"""
PatternGenerator that combines or selects from a list of other
PatternGenerators.
"""
__abstract=True
generators = param.List(class_=PatternGenerator,default=[Constant(scale=0.0)],
bounds=(1,None),precedence=0.97, doc="""
List of patterns to combine or select from. The default pattern is a blank pattern,
and thus should be overridden for any useful work.""")
size = param.Number(default=1.0,doc="""Scaling factor applied to all sub-patterns.""")
class Composite(CompositeBase):
"""
PatternGenerator that accepts a list of other PatternGenerators.
To create a new pattern, asks each of the PatternGenerators in the
list to create a pattern, then it combines the patterns to create
a single pattern that it returns.
"""
# The Accum_Replace operator from LISSOM is not yet supported,
# but it should be added once PatternGenerator bounding boxes
# are respected and/or GenericImage patterns support transparency.
operator = param.Parameter(np.maximum,precedence=0.98,doc="""
Binary Numpy function used to combine the individual patterns.
Any binary Numpy array "ufunc" returning the same
type of array as the operands and supporting the reduce
operator is allowed here. Supported ufuncs include::
add
subtract
multiply
divide
maximum
minimum
remainder
power
The most useful ones are probably add and maximum, but there
are uses for at least some of the others as well (e.g. to
remove pieces of other patterns).
You can also write your own operators, by making a class that
has a static method named "reduce" that returns an array of the
same size and type as the arrays in the list. For example::
class return_first(object):
@staticmethod
def reduce(x):
return x[0]
""")
def _advance_pattern_generators(self,p):
"""
Subclasses can override this method to provide constraints on
the values of generators' parameters and/or eliminate
generators from this list if necessary.
"""
return p.generators
def state_push(self):
"""
Push the state of all generators
"""
super(Composite,self).state_push()
for gen in self.generators:
gen.state_push()
def state_pop(self):
"""
Pop the state of all generators
"""
super(Composite,self).state_pop()
for gen in self.generators:
gen.state_pop()
# JABALERT: To support large numbers of patterns on a large input region,
# should be changed to evaluate each pattern in a small box, and then
# combine them at the full Composite Bounding box size.
def function(self,p):
"""Constructs combined pattern out of the individual ones."""
generators = self._advance_pattern_generators(p)
assert hasattr(p.operator,'reduce'),repr(p.operator)+" does not support 'reduce'."
# CEBALERT: mask gets applied by all PGs including the Composite itself
# (leads to redundant calculations in current lissom_oo_or usage, but
# will lead to problems/limitations in the future).
patterns = [pg(xdensity=p.xdensity,ydensity=p.ydensity,
bounds=p.bounds,mask=p.mask,
x=p.x+p.size*(pg.x*np.cos(p.orientation)- pg.y*np.sin(p.orientation)),
y=p.y+p.size*(pg.x*np.sin(p.orientation)+ pg.y*np.cos(p.orientation)),
orientation=pg.orientation+p.orientation,
size=pg.size*p.size)
for pg in generators]
image_array = p.operator.reduce(patterns)
return image_array
class ChannelTransform(param.Parameterized):
"""
A ChannelTransform is a callable object that takes channels as
input (an ordered dictionary of arrays) and transforms their
contents in some way before returning them.
"""
__abstract = True
def __call__(self, channels):
raise NotImplementedError
# Example of a ChannelTransform
class CorrelateChannels(ChannelTransform):
"""
Correlate channels by mixing a fraction of one channel into another.
"""
from_channel = param.Number(default=1, doc="""
Name of the channel to take data from.""")
to_channel = param.Number(default=2, doc="""
Name of the channel to change data of.""")
strength = param.Number(default=0, doc="""
Strength of the correlation to add, with 0 being no change,
and 1.0 overwriting to_channel with from_channel.""")
def __call__(self, channel_data):
channel_data[self.to_channel] = \
self.strength*channel_data[self.from_channel] + \
(1-self.strength)*channel_data[self.to_channel]
return channel_data
class ChannelGenerator(PatternGenerator):
"""
Abstract base class for patterns supporting multiple channels natively.
"""
__abstract = True
channel_transforms = param.HookList(class_=ChannelTransform,default=[],doc="""
Optional functions to apply post processing to the set of channels.""")
def __init__(self, **params):
self._original_channel_data = [] # channel data before processing
self._channel_data = [] # channel data after processing
super(ChannelGenerator, self).__init__(**params)
def channels(self, use_cached=False, **params_to_override):
res = collections.OrderedDict()
if not use_cached:
default = self(**params_to_override)
res['default'] = default
else:
res['default'] = None
for i in range(len(self._channel_data)):
res[i] = self._channel_data[i]
return res
def num_channels(self):
return len(self._channel_data)
class ComposeChannels(ChannelGenerator):
"""
Create a multi-channel PatternGenerator from a list of
PatternGenerators, with the specified channel_transforms applied.
"""
generators = param.List(class_=PatternGenerator,default=[Constant(scale=0.0)],
bounds=(1,None), doc="""
List of patterns to use for each channel. Generators which already have more than one
channel will only contribute to a single channel of ComposeChannels.""")
def __init__(self,**params):
super(ComposeChannels,self).__init__(**params)
for i in range(len(self.generators)):
self._channel_data.append( None )
def __call__(self,**params):
# Generates all channels, then returns the default channel
p = param.ParamOverrides(self,params)
params['xdensity']=p.xdensity
params['ydensity']=p.ydensity
params['bounds']=p.bounds
# (not **p)
for i in range(len(p.generators)):
self._channel_data[i] = p.generators[i]( **params )
for c in self.channel_transforms:
self._channel_data = c(self._channel_data)
return sum(act for act in self._channel_data)/len(self._channel_data)
| ioam/imagen | imagen/patterngenerator.py | Python | bsd-3-clause | 26,198 |
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import tempfile
import unittest
import mock
import requests
from infra_libs import temporary_directory
from infra.services.mastermon import pollers
class FakePoller(pollers.Poller):
endpoint = '/foo'
def __init__(self, base_url):
super(FakePoller, self).__init__(base_url, {})
self.called_with_data = None
def handle_response(self, data):
self.called_with_data = data
@mock.patch('requests.get')
class PollerTest(unittest.TestCase):
def test_requests_url(self, mock_get):
response = mock_get.return_value
response.json.return_value = {'foo': 'bar'}
response.status_code = 200
p = FakePoller('http://foobar')
self.assertTrue(p.poll())
self.assertEquals(1, mock_get.call_count)
self.assertEquals('http://foobar/json/foo', mock_get.call_args[0][0])
def test_strips_trailing_slashes(self, mock_get):
response = mock_get.return_value
response.json.return_value = {'foo': 'bar'}
response.status_code = 200
p = FakePoller('http://foobar////')
self.assertTrue(p.poll())
self.assertEquals(1, mock_get.call_count)
self.assertEquals('http://foobar/json/foo', mock_get.call_args[0][0])
def test_returns_false_for_non_200(self, mock_get):
response = mock_get.return_value
response.status_code = 404
p = FakePoller('http://foobar')
self.assertFalse(p.poll())
def test_returns_false_for_exception(self, mock_get):
mock_get.side_effect = requests.exceptions.ConnectionError
p = FakePoller('http://foobar')
self.assertFalse(p.poll())
def test_calls_handle_response(self, mock_get):
response = mock_get.return_value
response.json.return_value = {'foo': 'bar'}
response.status_code = 200
p = FakePoller('http://foobar')
self.assertTrue(p.poll())
self.assertEqual({'foo': 'bar'}, p.called_with_data)
def test_handles_invalid_json(self, mock_get):
response = mock_get.return_value
response.json.side_effect = ValueError
response.status_code = 200
p = FakePoller('http://foobar')
self.assertFalse(p.poll())
self.assertIsNone(p.called_with_data)
class VarzPollerTest(unittest.TestCase):
def test_response(self):
p = pollers.VarzPoller('', {'x': 'y'})
p.handle_response({
'server_uptime': 123,
'accepting_builds': True,
'builders': {
'foo': {
'connected_slaves': 1,
'current_builds': 2,
'pending_builds': 3,
'state': "offline",
'total_slaves': 4,
'recent_builds_by_status': {
'0': 1,
'2': 2,
'4': 3,
'building': 4,
},
'recent_finished_build_times': [1, 2, 3],
'recent_successful_build_times': [1, 2, 3],
},
'bar': {
'connected_slaves': 5,
'current_builds': 6,
'pending_builds': 7,
'state': "idle",
'total_slaves': 8,
'recent_builds_by_status': {
'0': 1,
'2': 2,
'4': 3,
'building': 4,
},
'recent_finished_build_times': [1, 2, 3],
'recent_successful_build_times': [1, 2, 3],
},
},
})
self.assertEqual(123, p.uptime.get({'x': 'y'}))
self.assertEqual(True, p.accepting_builds.get({'x': 'y'}))
self.assertEqual(1, p.connected.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(2, p.current_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(3, p.pending_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(4, p.total.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual('offline', p.state.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(5, p.connected.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(6, p.current_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(7, p.pending_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(8, p.total.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual('idle', p.state.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(1, p.recent_builds.get(
{'builder': 'foo', 'x': 'y', 'status': 'success'}))
self.assertEqual(4, p.recent_builds.get(
{'builder': 'foo', 'x': 'y', 'status': 'building'}))
self.assertIsNotNone(p.recent_finished_build_times.get(
{'builder': 'foo', 'x': 'y'}))
self.assertIsNotNone(p.recent_successful_build_times.get(
{'builder': 'foo', 'x': 'y'}))
def test_response_with_missing_data(self):
p = pollers.VarzPoller('', {'x': 'y'})
p.handle_response({
'server_uptime': 123,
'accepting_builds': True,
'builders': {
'foo': {
'state': "offline",
'total_slaves': 4,
},
'bar': {
'connected_slaves': 5,
'current_builds': 6,
'pending_builds': 7,
},
},
})
self.assertEqual(123, p.uptime.get({'x': 'y'}))
self.assertEqual(True, p.accepting_builds.get({'x': 'y'}))
self.assertEqual(0, p.connected.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(0, p.current_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(0, p.pending_builds.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(4, p.total.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual('offline', p.state.get({'builder': 'foo', 'x': 'y'}))
self.assertEqual(5, p.connected.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(6, p.current_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(7, p.pending_builds.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual(0, p.total.get({'builder': 'bar', 'x': 'y'}))
self.assertEqual('unknown', p.state.get({'builder': 'bar', 'x': 'y'}))
class FilePollerTest(unittest.TestCase):
@staticmethod
def create_data_file(dirname, data_list):
with open(os.path.join(dirname, 'ts_mon.log'), 'w') as f:
for data in data_list:
f.write('%s\n' % json.dumps(data))
return f.name
def test_no_file(self):
with temporary_directory(prefix='poller-test-') as tempdir:
filename = os.path.join(tempdir, 'no-such-file')
p = pollers.FilePoller(filename, {})
self.assertTrue(p.poll())
self.assertFalse(os.path.isfile(pollers.rotated_filename(filename)))
@mock.patch('infra_libs.ts_mon.CounterMetric.increment')
@mock.patch('infra_libs.ts_mon.CumulativeDistributionMetric.add')
def test_file_has_data(self, fake_add, fake_increment):
result1 = {'builder': 'b1', 'slave': 's1',
'result': 'r1', 'project_id': 'chromium'}
result2 = {'builder': 'b1', 'slave': 's1',
'result': 'r1', 'project_id': 'unknown'}
# Check that we've listed all the required metric fields.
self.assertEqual(set(result1), set(pollers.FilePoller.field_keys))
self.assertEqual(set(result2), set(pollers.FilePoller.field_keys))
data1 = result1.copy()
data2 = result2.copy()
data1['random'] = 'value' # Extra field, should be ignored.
del data2['project_id'] # Missing field, should become 'unknown'.
data2['duration_s'] = 5
with temporary_directory(prefix='poller-test-') as tempdir:
filename = self.create_data_file(tempdir, [data1, data2])
p = pollers.FilePoller(filename, {})
self.assertTrue(p.poll())
fake_increment.assert_any_call(result1)
fake_increment.assert_any_call(result2)
fake_add.assert_any_call(data2['duration_s'], result2)
self.assertFalse(os.path.isfile(filename))
# Make sure the rotated file is still there - for debugging.
self.assertTrue(os.path.isfile(pollers.rotated_filename(filename)))
def test_file_has_bad_data(self):
"""Mostly a smoke test: don't crash on bad data."""
with temporary_directory(prefix='poller-test-') as tempdir:
filename = self.create_data_file(tempdir, [])
with open(filename, 'a') as f:
f.write('}')
p = pollers.FilePoller(filename, {})
self.assertTrue(p.poll())
self.assertFalse(os.path.isfile(filename))
# Make sure the rotated file is still there - for debugging.
self.assertTrue(os.path.isfile(pollers.rotated_filename(filename)))
def test_safe_remove_error(self):
"""Smoke test: the function should not raise an exception."""
pollers.safe_remove('nonexistent-file')
| nicko96/Chrome-Infra | infra/services/mastermon/test/pollers_test.py | Python | bsd-3-clause | 8,592 |
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count
from django.db.models.loading import cache
from django.test import TestCase
from models import (ResolveThis, Item, RelatedItem, Child, Leaf, Proxy,
SimpleItem, Feature)
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
def test():
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
self.assertNumQueries(0, test)
def test():
self.assertEqual(obj.value, 42)
self.assertNumQueries(1, test)
def test():
self.assertEqual(obj.text, "xyzzy")
self.assertNumQueries(1, test)
def test():
self.assertEqual(obj.text, "xyzzy")
self.assertNumQueries(0, test)
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behaviour (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"), [
"l1",
],
attrgetter("name")
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only("name", "child", "second_child", "child__name", "second_child__name").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save()
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
# Regression for #11936 - loading.get_models should not return deferred
# models by default.
klasses = sorted(
cache.get_models(cache.get_app("defer_regress")),
key=lambda klass: klass.__name__
)
self.assertEqual(
klasses, [
Child,
Feature,
Item,
Leaf,
Proxy,
RelatedItem,
ResolveThis,
SimpleItem,
]
)
klasses = sorted(
map(
attrgetter("__name__"),
cache.get_models(
cache.get_app("defer_regress"), include_deferred=True
),
)
)
self.assertEqual(
klasses, [
"Child",
"Child_Deferred_value",
"Feature",
"Item",
"Item_Deferred_name",
"Item_Deferred_name_other_value_text",
"Item_Deferred_name_other_value_value",
"Item_Deferred_other_value_text_value",
"Item_Deferred_text_value",
"Leaf",
"Leaf_Deferred_child_id_second_child_id_value",
"Leaf_Deferred_name_value",
"Leaf_Deferred_second_child_value",
"Leaf_Deferred_value",
"Proxy",
"RelatedItem",
"RelatedItem_Deferred_",
"RelatedItem_Deferred_item_id",
"ResolveThis",
"SimpleItem",
]
)
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(list(SimpleItem.objects.annotate(Count('feature')).defer('name')), list)
self.assertIsInstance(list(SimpleItem.objects.annotate(Count('feature')).only('name')), list)
def test_only_and_defer_usage_on_proxy_models(self):
# Regression for #15790 - only() broken for proxy models
proxy = Proxy.objects.create(name="proxy", value=42)
msg = 'QuerySet.only() return bogus results with proxy models'
dp = Proxy.objects.only('other_value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
# also test things with .defer()
msg = 'QuerySet.defer() return bogus results with proxy models'
dp = Proxy.objects.defer('name', 'text', 'value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
def test_resolve_columns(self):
rt = ResolveThis.objects.create(num=5.0, name='Foobar')
qs = ResolveThis.objects.defer('num')
self.assertEqual(1, qs.count())
self.assertEqual('Foobar', qs[0].name)
| disqus/django-old | tests/regressiontests/defer_regress/tests.py | Python | bsd-3-clause | 6,716 |
# -*- coding: utf-8 -*-
"""
LICENCE
-------
Copyright 2015 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
SMQTK subsystem providing the ability to manage and fuse index rankings from
multiple descriptor-indexer pairings.
"""
| anguoyang/SMQTK | python/smqtk/fusion/__init__.py | Python | bsd-3-clause | 369 |
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2012 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import sys
import string
import optparse
import re
try:
import json
except ImportError:
import simplejson as json
import CodeGeneratorInspectorStrings
DOMAIN_DEFINE_NAME_MAP = {
"Database": "SQL_DATABASE",
"Debugger": "JAVASCRIPT_DEBUGGER",
"DOMDebugger": "JAVASCRIPT_DEBUGGER",
"FileSystem": "FILE_SYSTEM",
"IndexedDB": "INDEXED_DATABASE",
"Profiler": "JAVASCRIPT_DEBUGGER",
"Worker": "WORKERS",
}
# Manually-filled map of type name replacements.
TYPE_NAME_FIX_MAP = {
"RGBA": "Rgba", # RGBA is reported to be conflicting with a define name in Windows CE.
"": "Empty",
}
TYPES_WITH_RUNTIME_CAST_SET = frozenset(["Runtime.RemoteObject", "Runtime.PropertyDescriptor", "Runtime.InternalPropertyDescriptor",
"Debugger.FunctionDetails", "Debugger.CallFrame",
"Canvas.TraceLog", "Canvas.ResourceInfo", "Canvas.ResourceState",
# This should be a temporary hack. TimelineEvent should be created via generated C++ API.
"Timeline.TimelineEvent"])
TYPES_WITH_OPEN_FIELD_LIST_SET = frozenset(["Timeline.TimelineEvent",
# InspectorStyleSheet not only creates this property but wants to read it and modify it.
"CSS.CSSProperty",
# InspectorResourceAgent needs to update mime-type.
"Network.Response"])
EXACTLY_INT_SUPPORTED = False
cmdline_parser = optparse.OptionParser()
cmdline_parser.add_option("--output_h_dir")
cmdline_parser.add_option("--output_cpp_dir")
cmdline_parser.add_option("--output_js_dir")
cmdline_parser.add_option("--write_always", action="store_true")
cmdline_parser.add_option("--no_verification", action="store_true")
try:
arg_options, arg_values = cmdline_parser.parse_args()
if (len(arg_values) != 1):
raise Exception("Exactly one plain argument expected (found %s)" % len(arg_values))
input_json_filename = arg_values[0]
output_header_dirname = arg_options.output_h_dir
output_cpp_dirname = arg_options.output_cpp_dir
output_js_dirname = arg_options.output_js_dir
write_always = arg_options.write_always
verification = not arg_options.no_verification
if not output_header_dirname:
raise Exception("Output .h directory must be specified")
if not output_cpp_dirname:
raise Exception("Output .cpp directory must be specified")
if not output_js_dirname:
raise Exception("Output .js directory must be specified")
except Exception:
# Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
exc = sys.exc_info()[1]
sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
sys.stderr.write("Usage: <script> Inspector.json --output_h_dir <output_header_dir> --output_cpp_dir <output_cpp_dir> --output_js_dir <output_js_dir> [--write_always] [--no_verification]\n")
exit(1)
def dash_to_camelcase(word):
return ''.join(x.capitalize() or '-' for x in word.split('-'))
def fix_camel_case(name):
refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
refined = to_title_case(refined)
return re.sub(r'(?i)HTML|XML|WML|API|GC|XHR|DOM|CSS', lambda pat: pat.group(0).upper(), refined)
def to_title_case(name):
return name[:1].upper() + name[1:]
class Capitalizer:
@staticmethod
def lower_camel_case_to_upper(str):
if len(str) > 0 and str[0].islower():
str = str[0].upper() + str[1:]
return str
@staticmethod
def upper_camel_case_to_lower(str):
pos = 0
while pos < len(str) and str[pos].isupper():
pos += 1
if pos == 0:
return str
if pos == 1:
return str[0].lower() + str[1:]
if pos < len(str):
pos -= 1
possible_abbreviation = str[0:pos]
if possible_abbreviation not in Capitalizer.ABBREVIATION:
raise Exception("Unknown abbreviation %s" % possible_abbreviation)
str = possible_abbreviation.lower() + str[pos:]
return str
@staticmethod
def camel_case_to_capitalized_with_underscores(str):
if len(str) == 0:
return str
output = Capitalizer.split_camel_case_(str)
return "_".join(output).upper()
@staticmethod
def split_camel_case_(str):
output = []
pos_being = 0
pos = 1
has_oneletter = False
while pos < len(str):
if str[pos].isupper():
output.append(str[pos_being:pos].upper())
if pos - pos_being == 1:
has_oneletter = True
pos_being = pos
pos += 1
output.append(str[pos_being:])
if has_oneletter:
array_pos = 0
while array_pos < len(output) - 1:
if len(output[array_pos]) == 1:
array_pos_end = array_pos + 1
while array_pos_end < len(output) and len(output[array_pos_end]) == 1:
array_pos_end += 1
if array_pos_end - array_pos > 1:
possible_abbreviation = "".join(output[array_pos:array_pos_end])
if possible_abbreviation.upper() in Capitalizer.ABBREVIATION:
output[array_pos:array_pos_end] = [possible_abbreviation]
else:
array_pos = array_pos_end - 1
array_pos += 1
return output
ABBREVIATION = frozenset(["XHR", "DOM", "CSS"])
VALIDATOR_IFDEF_NAME = "!ASSERT_DISABLED"
class DomainNameFixes:
@classmethod
def get_fixed_data(cls, domain_name):
field_name_res = Capitalizer.upper_camel_case_to_lower(domain_name) + "Agent"
class Res(object):
skip_js_bind = domain_name in cls.skip_js_bind_domains
agent_field_name = field_name_res
@staticmethod
def get_guard():
if domain_name in DOMAIN_DEFINE_NAME_MAP:
define_name = DOMAIN_DEFINE_NAME_MAP[domain_name]
class Guard:
@staticmethod
def generate_open(output):
output.append("#if ENABLE(%s)\n" % define_name)
@staticmethod
def generate_close(output):
output.append("#endif // ENABLE(%s)\n" % define_name)
return Guard
return Res
skip_js_bind_domains = set(["DOMDebugger"])
class RawTypes(object):
@staticmethod
def get(json_type):
if json_type == "boolean":
return RawTypes.Bool
elif json_type == "string":
return RawTypes.String
elif json_type == "array":
return RawTypes.Array
elif json_type == "object":
return RawTypes.Object
elif json_type == "integer":
return RawTypes.Int
elif json_type == "number":
return RawTypes.Number
elif json_type == "any":
return RawTypes.Any
else:
raise Exception("Unknown type: %s" % json_type)
# For output parameter all values are passed by pointer except RefPtr-based types.
class OutputPassModel:
class ByPointer:
@staticmethod
def get_argument_prefix():
return "&"
@staticmethod
def get_parameter_type_suffix():
return "*"
class ByReference:
@staticmethod
def get_argument_prefix():
return ""
@staticmethod
def get_parameter_type_suffix():
return "&"
class BaseType(object):
need_internal_runtime_cast_ = False
@classmethod
def request_raw_internal_runtime_cast(cls):
if not cls.need_internal_runtime_cast_:
cls.need_internal_runtime_cast_ = True
@classmethod
def get_raw_validator_call_text(cls):
return "RuntimeCastHelper::assertType<InspectorValue::Type%s>" % cls.get_validate_method_params().template_type
class String(BaseType):
@staticmethod
def get_getter_name():
return "String"
get_setter_name = get_getter_name
@staticmethod
def get_c_initializer():
return "\"\""
@staticmethod
def get_js_bind_type():
return "string"
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "String"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "String"
@staticmethod
def get_raw_type_model():
return TypeModel.String
class Int(BaseType):
@staticmethod
def get_getter_name():
return "Int"
@staticmethod
def get_setter_name():
return "Number"
@staticmethod
def get_c_initializer():
return "0"
@staticmethod
def get_js_bind_type():
return "number"
@classmethod
def get_raw_validator_call_text(cls):
return "RuntimeCastHelper::assertInt"
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return False
@staticmethod
def get_array_item_raw_c_type_text():
return "int"
@staticmethod
def get_raw_type_model():
return TypeModel.Int
class Number(BaseType):
@staticmethod
def get_getter_name():
return "Double"
@staticmethod
def get_setter_name():
return "Number"
@staticmethod
def get_c_initializer():
return "0"
@staticmethod
def get_js_bind_type():
return "number"
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Number"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return False
@staticmethod
def get_array_item_raw_c_type_text():
return "double"
@staticmethod
def get_raw_type_model():
return TypeModel.Number
class Bool(BaseType):
@staticmethod
def get_getter_name():
return "Boolean"
get_setter_name = get_getter_name
@staticmethod
def get_c_initializer():
return "false"
@staticmethod
def get_js_bind_type():
return "boolean"
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Boolean"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return False
@staticmethod
def get_array_item_raw_c_type_text():
return "bool"
@staticmethod
def get_raw_type_model():
return TypeModel.Bool
class Object(BaseType):
@staticmethod
def get_getter_name():
return "Object"
@staticmethod
def get_setter_name():
return "Value"
@staticmethod
def get_c_initializer():
return "InspectorObject::create()"
@staticmethod
def get_js_bind_type():
return "object"
@staticmethod
def get_output_argument_prefix():
return ""
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Object"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByReference
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "InspectorObject"
@staticmethod
def get_raw_type_model():
return TypeModel.Object
class Any(BaseType):
@staticmethod
def get_getter_name():
return "Value"
get_setter_name = get_getter_name
@staticmethod
def get_c_initializer():
raise Exception("Unsupported")
@staticmethod
def get_js_bind_type():
raise Exception("Unsupported")
@staticmethod
def get_raw_validator_call_text():
return "RuntimeCastHelper::assertAny"
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByReference
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "InspectorValue"
@staticmethod
def get_raw_type_model():
return TypeModel.Any
class Array(BaseType):
@staticmethod
def get_getter_name():
return "Array"
@staticmethod
def get_setter_name():
return "Value"
@staticmethod
def get_c_initializer():
return "InspectorArray::create()"
@staticmethod
def get_js_bind_type():
return "object"
@staticmethod
def get_output_argument_prefix():
return ""
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Array"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByReference
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "InspectorArray"
@staticmethod
def get_raw_type_model():
return TypeModel.Array
def replace_right_shift(input_str):
return input_str.replace(">>", "> >")
class CommandReturnPassModel:
class ByReference:
def __init__(self, var_type, set_condition):
self.var_type = var_type
self.set_condition = set_condition
def get_return_var_type(self):
return self.var_type
@staticmethod
def get_output_argument_prefix():
return ""
@staticmethod
def get_output_to_raw_expression():
return "%s"
def get_output_parameter_type(self):
return self.var_type + "&"
def get_set_return_condition(self):
return self.set_condition
class ByPointer:
def __init__(self, var_type):
self.var_type = var_type
def get_return_var_type(self):
return self.var_type
@staticmethod
def get_output_argument_prefix():
return "&"
@staticmethod
def get_output_to_raw_expression():
return "%s"
def get_output_parameter_type(self):
return self.var_type + "*"
@staticmethod
def get_set_return_condition():
return None
class OptOutput:
def __init__(self, var_type):
self.var_type = var_type
def get_return_var_type(self):
return "TypeBuilder::OptOutput<%s>" % self.var_type
@staticmethod
def get_output_argument_prefix():
return "&"
@staticmethod
def get_output_to_raw_expression():
return "%s.getValue()"
def get_output_parameter_type(self):
return "TypeBuilder::OptOutput<%s>*" % self.var_type
@staticmethod
def get_set_return_condition():
return "%s.isAssigned()"
class TypeModel:
class RefPtrBased(object):
def __init__(self, class_name):
self.class_name = class_name
self.optional = False
def get_optional(self):
result = TypeModel.RefPtrBased(self.class_name)
result.optional = True
return result
def get_command_return_pass_model(self):
if self.optional:
set_condition = "%s"
else:
set_condition = None
return CommandReturnPassModel.ByReference(replace_right_shift("RefPtr<%s>" % self.class_name), set_condition)
def get_input_param_type_text(self):
return replace_right_shift("PassRefPtr<%s>" % self.class_name)
@staticmethod
def get_event_setter_expression_pattern():
return "%s"
class Enum(object):
def __init__(self, base_type_name):
self.type_name = base_type_name + "::Enum"
def get_optional(base_self):
class EnumOptional:
@classmethod
def get_optional(cls):
return cls
@staticmethod
def get_command_return_pass_model():
return CommandReturnPassModel.OptOutput(base_self.type_name)
@staticmethod
def get_input_param_type_text():
return base_self.type_name + "*"
@staticmethod
def get_event_setter_expression_pattern():
raise Exception("TODO")
return EnumOptional
def get_command_return_pass_model(self):
return CommandReturnPassModel.ByPointer(self.type_name)
def get_input_param_type_text(self):
return self.type_name
@staticmethod
def get_event_setter_expression_pattern():
return "%s"
class ValueType(object):
def __init__(self, type_name, is_heavy):
self.type_name = type_name
self.is_heavy = is_heavy
def get_optional(self):
return self.ValueOptional(self)
def get_command_return_pass_model(self):
return CommandReturnPassModel.ByPointer(self.type_name)
def get_input_param_type_text(self):
if self.is_heavy:
return "const %s&" % self.type_name
else:
return self.type_name
def get_opt_output_type_(self):
return self.type_name
@staticmethod
def get_event_setter_expression_pattern():
return "%s"
class ValueOptional:
def __init__(self, base):
self.base = base
def get_optional(self):
return self
def get_command_return_pass_model(self):
return CommandReturnPassModel.OptOutput(self.base.get_opt_output_type_())
def get_input_param_type_text(self):
return "const %s* const" % self.base.type_name
@staticmethod
def get_event_setter_expression_pattern():
return "*%s"
class ExactlyInt(ValueType):
def __init__(self):
TypeModel.ValueType.__init__(self, "int", False)
def get_input_param_type_text(self):
return "TypeBuilder::ExactlyInt"
def get_opt_output_type_(self):
return "TypeBuilder::ExactlyInt"
@classmethod
def init_class(cls):
cls.Bool = cls.ValueType("bool", False)
if EXACTLY_INT_SUPPORTED:
cls.Int = cls.ExactlyInt()
else:
cls.Int = cls.ValueType("int", False)
cls.Number = cls.ValueType("double", False)
cls.String = cls.ValueType("String", True,)
cls.Object = cls.RefPtrBased("InspectorObject")
cls.Array = cls.RefPtrBased("InspectorArray")
cls.Any = cls.RefPtrBased("InspectorValue")
TypeModel.init_class()
# Collection of InspectorObject class methods that are likely to be overloaded in generated class.
# We must explicitly import all overloaded methods or they won't be available to user.
INSPECTOR_OBJECT_SETTER_NAMES = frozenset(["setValue", "setBoolean", "setNumber", "setString", "setValue", "setObject", "setArray"])
def fix_type_name(json_name):
if json_name in TYPE_NAME_FIX_MAP:
fixed = TYPE_NAME_FIX_MAP[json_name]
class Result(object):
class_name = fixed
@staticmethod
def output_comment(writer):
writer.newline("// Type originally was named '%s'.\n" % json_name)
else:
class Result(object):
class_name = json_name
@staticmethod
def output_comment(writer):
pass
return Result
class Writer:
def __init__(self, output, indent):
self.output = output
self.indent = indent
def newline(self, str):
if (self.indent):
self.output.append(self.indent)
self.output.append(str)
def append(self, str):
self.output.append(str)
def newline_multiline(self, str):
parts = str.split('\n')
self.newline(parts[0])
for p in parts[1:]:
self.output.append('\n')
if p:
self.newline(p)
def append_multiline(self, str):
parts = str.split('\n')
self.append(parts[0])
for p in parts[1:]:
self.output.append('\n')
if p:
self.newline(p)
def get_indent(self):
return self.indent
def get_indented(self, additional_indent):
return Writer(self.output, self.indent + additional_indent)
def insert_writer(self, additional_indent):
new_output = []
self.output.append(new_output)
return Writer(new_output, self.indent + additional_indent)
class EnumConstants:
map_ = {}
constants_ = []
@classmethod
def add_constant(cls, value):
if value in cls.map_:
return cls.map_[value]
else:
pos = len(cls.map_)
cls.map_[value] = pos
cls.constants_.append(value)
return pos
@classmethod
def get_enum_constant_code(cls):
output = []
for item in cls.constants_:
output.append(" \"" + item + "\"")
return ",\n".join(output) + "\n"
# Typebuilder code is generated in several passes: first typedefs, then other classes.
# Manual pass management is needed because we cannot have forward declarations for typedefs.
class TypeBuilderPass:
TYPEDEF = "typedef"
MAIN = "main"
class TypeBindings:
@staticmethod
def create_named_type_declaration(json_typable, context_domain_name, type_data):
json_type = type_data.get_json_type()
class Helper:
is_ad_hoc = False
full_name_prefix_for_use = "TypeBuilder::" + context_domain_name + "::"
full_name_prefix_for_impl = "TypeBuilder::" + context_domain_name + "::"
@staticmethod
def write_doc(writer):
if "description" in json_type:
writer.newline("/* ")
writer.append(json_type["description"])
writer.append(" */\n")
@staticmethod
def add_to_forward_listener(forward_listener):
forward_listener.add_type_data(type_data)
fixed_type_name = fix_type_name(json_type["id"])
return TypeBindings.create_type_declaration_(json_typable, context_domain_name, fixed_type_name, Helper)
@staticmethod
def create_ad_hoc_type_declaration(json_typable, context_domain_name, ad_hoc_type_context):
class Helper:
is_ad_hoc = True
full_name_prefix_for_use = ad_hoc_type_context.container_relative_name_prefix
full_name_prefix_for_impl = ad_hoc_type_context.container_full_name_prefix
@staticmethod
def write_doc(writer):
pass
@staticmethod
def add_to_forward_listener(forward_listener):
pass
fixed_type_name = ad_hoc_type_context.get_type_name_fix()
return TypeBindings.create_type_declaration_(json_typable, context_domain_name, fixed_type_name, Helper)
@staticmethod
def create_type_declaration_(json_typable, context_domain_name, fixed_type_name, helper):
if json_typable["type"] == "string":
if "enum" in json_typable:
class EnumBinding:
need_user_runtime_cast_ = False
need_internal_runtime_cast_ = False
@classmethod
def resolve_inner(cls, resolve_context):
pass
@classmethod
def request_user_runtime_cast(cls, request):
if request:
cls.need_user_runtime_cast_ = True
request.acknowledge()
@classmethod
def request_internal_runtime_cast(cls):
cls.need_internal_runtime_cast_ = True
@classmethod
def get_code_generator(enum_binding_cls):
#FIXME: generate ad-hoc enums too once we figure out how to better implement them in C++.
comment_out = helper.is_ad_hoc
class CodeGenerator:
@staticmethod
def generate_type_builder(writer, generate_context):
enum = json_typable["enum"]
helper.write_doc(writer)
enum_name = fixed_type_name.class_name
fixed_type_name.output_comment(writer)
writer.newline("struct ")
writer.append(enum_name)
writer.append(" {\n")
writer.newline(" enum Enum {\n")
for enum_item in enum:
enum_pos = EnumConstants.add_constant(enum_item)
item_c_name = fix_camel_case(enum_item)
if item_c_name in TYPE_NAME_FIX_MAP:
item_c_name = TYPE_NAME_FIX_MAP[item_c_name]
writer.newline(" ")
writer.append(item_c_name)
writer.append(" = ")
writer.append("%s" % enum_pos)
writer.append(",\n")
writer.newline(" };\n")
if enum_binding_cls.need_user_runtime_cast_:
raise Exception("Not yet implemented")
if enum_binding_cls.need_internal_runtime_cast_:
writer.append("#if %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" static void assertCorrectValue(InspectorValue* value);\n")
writer.append("#endif // %s\n" % VALIDATOR_IFDEF_NAME)
validator_writer = generate_context.validator_writer
domain_fixes = DomainNameFixes.get_fixed_data(context_domain_name)
domain_guard = domain_fixes.get_guard()
if domain_guard:
domain_guard.generate_open(validator_writer)
validator_writer.newline("void %s%s::assertCorrectValue(InspectorValue* value)\n" % (helper.full_name_prefix_for_impl, enum_name))
validator_writer.newline("{\n")
validator_writer.newline(" WTF::String s;\n")
validator_writer.newline(" bool cast_res = value->asString(&s);\n")
validator_writer.newline(" ASSERT(cast_res);\n")
if len(enum) > 0:
condition_list = []
for enum_item in enum:
enum_pos = EnumConstants.add_constant(enum_item)
condition_list.append("s == \"%s\"" % enum_item)
validator_writer.newline(" ASSERT(%s);\n" % " || ".join(condition_list))
validator_writer.newline("}\n")
if domain_guard:
domain_guard.generate_close(validator_writer)
validator_writer.newline("\n\n")
writer.newline("}; // struct ")
writer.append(enum_name)
writer.append("\n\n")
@staticmethod
def register_use(forward_listener):
pass
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.MAIN
return CodeGenerator
@classmethod
def get_validator_call_text(cls):
return helper.full_name_prefix_for_use + fixed_type_name.class_name + "::assertCorrectValue"
@classmethod
def get_array_item_c_type_text(cls):
return helper.full_name_prefix_for_use + fixed_type_name.class_name + "::Enum"
@staticmethod
def get_setter_value_expression_pattern():
return "TypeBuilder::getEnumConstantValue(%s)"
@staticmethod
def reduce_to_raw_type():
return RawTypes.String
@staticmethod
def get_type_model():
return TypeModel.Enum(helper.full_name_prefix_for_use + fixed_type_name.class_name)
return EnumBinding
else:
if helper.is_ad_hoc:
class PlainString:
@classmethod
def resolve_inner(cls, resolve_context):
pass
@staticmethod
def request_user_runtime_cast(request):
raise Exception("Unsupported")
@staticmethod
def request_internal_runtime_cast():
pass
@staticmethod
def get_code_generator():
return None
@classmethod
def get_validator_call_text(cls):
return RawTypes.String.get_raw_validator_call_text()
@staticmethod
def reduce_to_raw_type():
return RawTypes.String
@staticmethod
def get_type_model():
return TypeModel.String
@staticmethod
def get_setter_value_expression_pattern():
return None
@classmethod
def get_array_item_c_type_text(cls):
return cls.reduce_to_raw_type().get_array_item_raw_c_type_text()
return PlainString
else:
class TypedefString:
@classmethod
def resolve_inner(cls, resolve_context):
pass
@staticmethod
def request_user_runtime_cast(request):
raise Exception("Unsupported")
@staticmethod
def request_internal_runtime_cast():
RawTypes.String.request_raw_internal_runtime_cast()
@staticmethod
def get_code_generator():
class CodeGenerator:
@staticmethod
def generate_type_builder(writer, generate_context):
helper.write_doc(writer)
fixed_type_name.output_comment(writer)
writer.newline("typedef String ")
writer.append(fixed_type_name.class_name)
writer.append(";\n\n")
@staticmethod
def register_use(forward_listener):
pass
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.TYPEDEF
return CodeGenerator
@classmethod
def get_validator_call_text(cls):
return RawTypes.String.get_raw_validator_call_text()
@staticmethod
def reduce_to_raw_type():
return RawTypes.String
@staticmethod
def get_type_model():
return TypeModel.ValueType("%s%s" % (helper.full_name_prefix_for_use, fixed_type_name.class_name), True)
@staticmethod
def get_setter_value_expression_pattern():
return None
@classmethod
def get_array_item_c_type_text(cls):
return "const %s%s&" % (helper.full_name_prefix_for_use, fixed_type_name.class_name)
return TypedefString
elif json_typable["type"] == "object":
if "properties" in json_typable:
class ClassBinding:
resolve_data_ = None
need_user_runtime_cast_ = False
need_internal_runtime_cast_ = False
@classmethod
def resolve_inner(cls, resolve_context):
if cls.resolve_data_:
return
properties = json_typable["properties"]
main = []
optional = []
ad_hoc_type_list = []
for prop in properties:
prop_name = prop["name"]
ad_hoc_type_context = cls.AdHocTypeContextImpl(prop_name, fixed_type_name.class_name, resolve_context, ad_hoc_type_list, helper.full_name_prefix_for_impl)
binding = resolve_param_type(prop, context_domain_name, ad_hoc_type_context)
code_generator = binding.get_code_generator()
if code_generator:
code_generator.register_use(resolve_context.forward_listener)
class PropertyData:
param_type_binding = binding
p = prop
if prop.get("optional"):
optional.append(PropertyData)
else:
main.append(PropertyData)
class ResolveData:
main_properties = main
optional_properties = optional
ad_hoc_types = ad_hoc_type_list
cls.resolve_data_ = ResolveData
for ad_hoc in ad_hoc_type_list:
ad_hoc.resolve_inner(resolve_context)
@classmethod
def request_user_runtime_cast(cls, request):
if not request:
return
cls.need_user_runtime_cast_ = True
request.acknowledge()
cls.request_internal_runtime_cast()
@classmethod
def request_internal_runtime_cast(cls):
if cls.need_internal_runtime_cast_:
return
cls.need_internal_runtime_cast_ = True
for p in cls.resolve_data_.main_properties:
p.param_type_binding.request_internal_runtime_cast()
for p in cls.resolve_data_.optional_properties:
p.param_type_binding.request_internal_runtime_cast()
@classmethod
def get_code_generator(class_binding_cls):
class CodeGenerator:
@classmethod
def generate_type_builder(cls, writer, generate_context):
resolve_data = class_binding_cls.resolve_data_
helper.write_doc(writer)
class_name = fixed_type_name.class_name
is_open_type = (context_domain_name + "." + class_name) in TYPES_WITH_OPEN_FIELD_LIST_SET
fixed_type_name.output_comment(writer)
writer.newline("class ")
writer.append(class_name)
writer.append(" : public ")
if is_open_type:
writer.append("InspectorObject")
else:
writer.append("InspectorObjectBase")
writer.append(" {\n")
writer.newline("public:\n")
ad_hoc_type_writer = writer.insert_writer(" ")
for ad_hoc_type in resolve_data.ad_hoc_types:
code_generator = ad_hoc_type.get_code_generator()
if code_generator:
code_generator.generate_type_builder(ad_hoc_type_writer, generate_context)
writer.newline_multiline(
""" enum {
NoFieldsSet = 0,
""")
state_enum_items = []
if len(resolve_data.main_properties) > 0:
pos = 0
for prop_data in resolve_data.main_properties:
item_name = Capitalizer.lower_camel_case_to_upper(prop_data.p["name"]) + "Set"
state_enum_items.append(item_name)
writer.newline(" %s = 1 << %s,\n" % (item_name, pos))
pos += 1
all_fields_set_value = "(" + (" | ".join(state_enum_items)) + ")"
else:
all_fields_set_value = "0"
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_1
% (all_fields_set_value, class_name, class_name))
pos = 0
for prop_data in resolve_data.main_properties:
prop_name = prop_data.p["name"]
param_type_binding = prop_data.param_type_binding
param_raw_type = param_type_binding.reduce_to_raw_type()
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_2
% (state_enum_items[pos],
Capitalizer.lower_camel_case_to_upper(prop_name),
param_type_binding.get_type_model().get_input_param_type_text(),
state_enum_items[pos], prop_name,
param_raw_type.get_setter_name(), prop_name,
format_setter_value_expression(param_type_binding, "value"),
state_enum_items[pos]))
pos += 1
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_3
% (class_name, class_name, class_name, class_name, class_name))
writer.newline(" /*\n")
writer.newline(" * Synthetic constructor:\n")
writer.newline(" * RefPtr<%s> result = %s::create()" % (class_name, class_name))
for prop_data in resolve_data.main_properties:
writer.append_multiline("\n * .set%s(...)" % Capitalizer.lower_camel_case_to_upper(prop_data.p["name"]))
writer.append_multiline(";\n */\n")
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_4)
writer.newline(" typedef TypeBuilder::StructItemTraits ItemTraits;\n")
for prop_data in resolve_data.optional_properties:
prop_name = prop_data.p["name"]
param_type_binding = prop_data.param_type_binding
setter_name = "set%s" % Capitalizer.lower_camel_case_to_upper(prop_name)
writer.append_multiline("\n void %s" % setter_name)
writer.append("(%s value)\n" % param_type_binding.get_type_model().get_input_param_type_text())
writer.newline(" {\n")
writer.newline(" this->set%s(\"%s\", %s);\n"
% (param_type_binding.reduce_to_raw_type().get_setter_name(), prop_data.p["name"],
format_setter_value_expression(param_type_binding, "value")))
writer.newline(" }\n")
if setter_name in INSPECTOR_OBJECT_SETTER_NAMES:
writer.newline(" using InspectorObjectBase::%s;\n\n" % setter_name)
if class_binding_cls.need_user_runtime_cast_:
writer.newline(" static PassRefPtr<%s> runtimeCast(PassRefPtr<InspectorValue> value)\n" % class_name)
writer.newline(" {\n")
writer.newline(" RefPtr<InspectorObject> object;\n")
writer.newline(" bool castRes = value->asObject(&object);\n")
writer.newline(" ASSERT_UNUSED(castRes, castRes);\n")
writer.append("#if %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" assertCorrectValue(object.get());\n")
writer.append("#endif // %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" COMPILE_ASSERT(sizeof(%s) == sizeof(InspectorObjectBase), type_cast_problem);\n" % class_name)
writer.newline(" return static_cast<%s*>(static_cast<InspectorObjectBase*>(object.get()));\n" % class_name)
writer.newline(" }\n")
writer.append("\n")
if class_binding_cls.need_internal_runtime_cast_:
writer.append("#if %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" static void assertCorrectValue(InspectorValue* value);\n")
writer.append("#endif // %s\n" % VALIDATOR_IFDEF_NAME)
closed_field_set = (context_domain_name + "." + class_name) not in TYPES_WITH_OPEN_FIELD_LIST_SET
validator_writer = generate_context.validator_writer
domain_fixes = DomainNameFixes.get_fixed_data(context_domain_name)
domain_guard = domain_fixes.get_guard()
if domain_guard:
domain_guard.generate_open(validator_writer)
validator_writer.newline("void %s%s::assertCorrectValue(InspectorValue* value)\n" % (helper.full_name_prefix_for_impl, class_name))
validator_writer.newline("{\n")
validator_writer.newline(" RefPtr<InspectorObject> object;\n")
validator_writer.newline(" bool castRes = value->asObject(&object);\n")
validator_writer.newline(" ASSERT_UNUSED(castRes, castRes);\n")
for prop_data in resolve_data.main_properties:
validator_writer.newline(" {\n")
it_name = "%sPos" % prop_data.p["name"]
validator_writer.newline(" InspectorObject::iterator %s;\n" % it_name)
validator_writer.newline(" %s = object->find(\"%s\");\n" % (it_name, prop_data.p["name"]))
validator_writer.newline(" ASSERT(%s != object->end());\n" % it_name)
validator_writer.newline(" %s(%s->value.get());\n" % (prop_data.param_type_binding.get_validator_call_text(), it_name))
validator_writer.newline(" }\n")
if closed_field_set:
validator_writer.newline(" int foundPropertiesCount = %s;\n" % len(resolve_data.main_properties))
for prop_data in resolve_data.optional_properties:
validator_writer.newline(" {\n")
it_name = "%sPos" % prop_data.p["name"]
validator_writer.newline(" InspectorObject::iterator %s;\n" % it_name)
validator_writer.newline(" %s = object->find(\"%s\");\n" % (it_name, prop_data.p["name"]))
validator_writer.newline(" if (%s != object->end()) {\n" % it_name)
validator_writer.newline(" %s(%s->value.get());\n" % (prop_data.param_type_binding.get_validator_call_text(), it_name))
if closed_field_set:
validator_writer.newline(" ++foundPropertiesCount;\n")
validator_writer.newline(" }\n")
validator_writer.newline(" }\n")
if closed_field_set:
validator_writer.newline(" if (foundPropertiesCount != object->size()) {\n")
validator_writer.newline(" FATAL(\"Unexpected properties in object: %s\\n\", object->toJSONString().ascii().data());\n")
validator_writer.newline(" }\n")
validator_writer.newline("}\n")
if domain_guard:
domain_guard.generate_close(validator_writer)
validator_writer.newline("\n\n")
if is_open_type:
cpp_writer = generate_context.cpp_writer
writer.append("\n")
writer.newline(" // Property names for type generated as open.\n")
for prop_data in resolve_data.main_properties + resolve_data.optional_properties:
prop_name = prop_data.p["name"]
prop_field_name = Capitalizer.lower_camel_case_to_upper(prop_name)
writer.newline(" static const char* %s;\n" % (prop_field_name))
cpp_writer.newline("const char* %s%s::%s = \"%s\";\n" % (helper.full_name_prefix_for_impl, class_name, prop_field_name, prop_name))
writer.newline("};\n\n")
@staticmethod
def generate_forward_declaration(writer):
class_name = fixed_type_name.class_name
writer.newline("class ")
writer.append(class_name)
writer.append(";\n")
@staticmethod
def register_use(forward_listener):
helper.add_to_forward_listener(forward_listener)
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.MAIN
return CodeGenerator
@staticmethod
def get_validator_call_text():
return helper.full_name_prefix_for_use + fixed_type_name.class_name + "::assertCorrectValue"
@classmethod
def get_array_item_c_type_text(cls):
return helper.full_name_prefix_for_use + fixed_type_name.class_name
@staticmethod
def get_setter_value_expression_pattern():
return None
@staticmethod
def reduce_to_raw_type():
return RawTypes.Object
@staticmethod
def get_type_model():
return TypeModel.RefPtrBased(helper.full_name_prefix_for_use + fixed_type_name.class_name)
class AdHocTypeContextImpl:
def __init__(self, property_name, class_name, resolve_context, ad_hoc_type_list, parent_full_name_prefix):
self.property_name = property_name
self.class_name = class_name
self.resolve_context = resolve_context
self.ad_hoc_type_list = ad_hoc_type_list
self.container_full_name_prefix = parent_full_name_prefix + class_name + "::"
self.container_relative_name_prefix = ""
def get_type_name_fix(self):
class NameFix:
class_name = Capitalizer.lower_camel_case_to_upper(self.property_name)
@staticmethod
def output_comment(writer):
writer.newline("// Named after property name '%s' while generating %s.\n" % (self.property_name, self.class_name))
return NameFix
def add_type(self, binding):
self.ad_hoc_type_list.append(binding)
return ClassBinding
else:
class PlainObjectBinding:
@classmethod
def resolve_inner(cls, resolve_context):
pass
@staticmethod
def request_user_runtime_cast(request):
pass
@staticmethod
def request_internal_runtime_cast():
RawTypes.Object.request_raw_internal_runtime_cast()
@staticmethod
def get_code_generator():
pass
@staticmethod
def get_validator_call_text():
return "RuntimeCastHelper::assertType<InspectorValue::TypeObject>"
@classmethod
def get_array_item_c_type_text(cls):
return cls.reduce_to_raw_type().get_array_item_raw_c_type_text()
@staticmethod
def get_setter_value_expression_pattern():
return None
@staticmethod
def reduce_to_raw_type():
return RawTypes.Object
@staticmethod
def get_type_model():
return TypeModel.Object
return PlainObjectBinding
elif json_typable["type"] == "array":
if "items" in json_typable:
ad_hoc_types = []
class AdHocTypeContext:
container_full_name_prefix = "<not yet defined>"
container_relative_name_prefix = ""
@staticmethod
def get_type_name_fix():
return fixed_type_name
@staticmethod
def add_type(binding):
ad_hoc_types.append(binding)
item_binding = resolve_param_type(json_typable["items"], context_domain_name, AdHocTypeContext)
class ArrayBinding:
resolve_data_ = None
need_internal_runtime_cast_ = False
@classmethod
def resolve_inner(cls, resolve_context):
if cls.resolve_data_:
return
class ResolveData:
item_type_binding = item_binding
ad_hoc_type_list = ad_hoc_types
cls.resolve_data_ = ResolveData
for t in ad_hoc_types:
t.resolve_inner(resolve_context)
@classmethod
def request_user_runtime_cast(cls, request):
raise Exception("Not implemented yet")
@classmethod
def request_internal_runtime_cast(cls):
if cls.need_internal_runtime_cast_:
return
cls.need_internal_runtime_cast_ = True
cls.resolve_data_.item_type_binding.request_internal_runtime_cast()
@classmethod
def get_code_generator(array_binding_cls):
class CodeGenerator:
@staticmethod
def generate_type_builder(writer, generate_context):
ad_hoc_type_writer = writer
resolve_data = array_binding_cls.resolve_data_
for ad_hoc_type in resolve_data.ad_hoc_type_list:
code_generator = ad_hoc_type.get_code_generator()
if code_generator:
code_generator.generate_type_builder(ad_hoc_type_writer, generate_context)
@staticmethod
def generate_forward_declaration(writer):
pass
@staticmethod
def register_use(forward_listener):
item_code_generator = item_binding.get_code_generator()
if item_code_generator:
item_code_generator.register_use(forward_listener)
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.MAIN
return CodeGenerator
@classmethod
def get_validator_call_text(cls):
return cls.get_array_item_c_type_text() + "::assertCorrectValue"
@classmethod
def get_array_item_c_type_text(cls):
return replace_right_shift("TypeBuilder::Array<%s>" % cls.resolve_data_.item_type_binding.get_array_item_c_type_text())
@staticmethod
def get_setter_value_expression_pattern():
return None
@staticmethod
def reduce_to_raw_type():
return RawTypes.Array
@classmethod
def get_type_model(cls):
return TypeModel.RefPtrBased(cls.get_array_item_c_type_text())
return ArrayBinding
else:
# Fall-through to raw type.
pass
raw_type = RawTypes.get(json_typable["type"])
return RawTypeBinding(raw_type)
class RawTypeBinding:
def __init__(self, raw_type):
self.raw_type_ = raw_type
def resolve_inner(self, resolve_context):
pass
def request_user_runtime_cast(self, request):
raise Exception("Unsupported")
def request_internal_runtime_cast(self):
self.raw_type_.request_raw_internal_runtime_cast()
def get_code_generator(self):
return None
def get_validator_call_text(self):
return self.raw_type_.get_raw_validator_call_text()
def get_array_item_c_type_text(self):
return self.raw_type_.get_array_item_raw_c_type_text()
def get_setter_value_expression_pattern(self):
return None
def reduce_to_raw_type(self):
return self.raw_type_
def get_type_model(self):
return self.raw_type_.get_raw_type_model()
class TypeData(object):
def __init__(self, json_type, json_domain, domain_data):
self.json_type_ = json_type
self.json_domain_ = json_domain
self.domain_data_ = domain_data
if "type" not in json_type:
raise Exception("Unknown type")
json_type_name = json_type["type"]
raw_type = RawTypes.get(json_type_name)
self.raw_type_ = raw_type
self.binding_being_resolved_ = False
self.binding_ = None
def get_raw_type(self):
return self.raw_type_
def get_binding(self):
if not self.binding_:
if self.binding_being_resolved_:
raise Error("Type %s is already being resolved" % self.json_type_["type"])
# Resolve only lazily, because resolving one named type may require resolving some other named type.
self.binding_being_resolved_ = True
try:
self.binding_ = TypeBindings.create_named_type_declaration(self.json_type_, self.json_domain_["domain"], self)
finally:
self.binding_being_resolved_ = False
return self.binding_
def get_json_type(self):
return self.json_type_
def get_name(self):
return self.json_type_["id"]
def get_domain_name(self):
return self.json_domain_["domain"]
class DomainData:
def __init__(self, json_domain):
self.json_domain = json_domain
self.types_ = []
def add_type(self, type_data):
self.types_.append(type_data)
def name(self):
return self.json_domain["domain"]
def types(self):
return self.types_
class TypeMap:
def __init__(self, api):
self.map_ = {}
self.domains_ = []
for json_domain in api["domains"]:
domain_name = json_domain["domain"]
domain_map = {}
self.map_[domain_name] = domain_map
domain_data = DomainData(json_domain)
self.domains_.append(domain_data)
if "types" in json_domain:
for json_type in json_domain["types"]:
type_name = json_type["id"]
type_data = TypeData(json_type, json_domain, domain_data)
domain_map[type_name] = type_data
domain_data.add_type(type_data)
def domains(self):
return self.domains_
def get(self, domain_name, type_name):
return self.map_[domain_name][type_name]
def resolve_param_type(json_parameter, scope_domain_name, ad_hoc_type_context):
if "$ref" in json_parameter:
json_ref = json_parameter["$ref"]
type_data = get_ref_data(json_ref, scope_domain_name)
return type_data.get_binding()
elif "type" in json_parameter:
result = TypeBindings.create_ad_hoc_type_declaration(json_parameter, scope_domain_name, ad_hoc_type_context)
ad_hoc_type_context.add_type(result)
return result
else:
raise Exception("Unknown type")
def resolve_param_raw_type(json_parameter, scope_domain_name):
if "$ref" in json_parameter:
json_ref = json_parameter["$ref"]
type_data = get_ref_data(json_ref, scope_domain_name)
return type_data.get_raw_type()
elif "type" in json_parameter:
json_type = json_parameter["type"]
return RawTypes.get(json_type)
else:
raise Exception("Unknown type")
def get_ref_data(json_ref, scope_domain_name):
dot_pos = json_ref.find(".")
if dot_pos == -1:
domain_name = scope_domain_name
type_name = json_ref
else:
domain_name = json_ref[:dot_pos]
type_name = json_ref[dot_pos + 1:]
return type_map.get(domain_name, type_name)
input_file = open(input_json_filename, "r")
json_string = input_file.read()
json_api = json.loads(json_string)
class Templates:
def get_this_script_path_(absolute_path):
absolute_path = os.path.abspath(absolute_path)
components = []
def fill_recursive(path_part, depth):
if depth <= 0 or path_part == '/':
return
fill_recursive(os.path.dirname(path_part), depth - 1)
components.append(os.path.basename(path_part))
# Typical path is /Source/WebCore/inspector/CodeGeneratorInspector.py
# Let's take 4 components from the real path then.
fill_recursive(absolute_path, 4)
return "/".join(components)
file_header_ = ("// File is generated by %s\n\n" % get_this_script_path_(sys.argv[0]) +
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
""")
frontend_domain_class = string.Template(CodeGeneratorInspectorStrings.frontend_domain_class)
backend_method = string.Template(CodeGeneratorInspectorStrings.backend_method)
frontend_method = string.Template(CodeGeneratorInspectorStrings.frontend_method)
callback_method = string.Template(CodeGeneratorInspectorStrings.callback_method)
frontend_h = string.Template(file_header_ + CodeGeneratorInspectorStrings.frontend_h)
backend_h = string.Template(file_header_ + CodeGeneratorInspectorStrings.backend_h)
backend_cpp = string.Template(file_header_ + CodeGeneratorInspectorStrings.backend_cpp)
frontend_cpp = string.Template(file_header_ + CodeGeneratorInspectorStrings.frontend_cpp)
typebuilder_h = string.Template(file_header_ + CodeGeneratorInspectorStrings.typebuilder_h)
typebuilder_cpp = string.Template(file_header_ + CodeGeneratorInspectorStrings.typebuilder_cpp)
backend_js = string.Template(file_header_ + CodeGeneratorInspectorStrings.backend_js)
param_container_access_code = CodeGeneratorInspectorStrings.param_container_access_code
type_map = TypeMap(json_api)
class NeedRuntimeCastRequest:
def __init__(self):
self.ack_ = None
def acknowledge(self):
self.ack_ = True
def is_acknowledged(self):
return self.ack_
def resolve_all_types():
runtime_cast_generate_requests = {}
for type_name in TYPES_WITH_RUNTIME_CAST_SET:
runtime_cast_generate_requests[type_name] = NeedRuntimeCastRequest()
class ForwardListener:
type_data_set = set()
already_declared_set = set()
@classmethod
def add_type_data(cls, type_data):
if type_data not in cls.already_declared_set:
cls.type_data_set.add(type_data)
class ResolveContext:
forward_listener = ForwardListener
for domain_data in type_map.domains():
for type_data in domain_data.types():
# Do not generate forwards for this type any longer.
ForwardListener.already_declared_set.add(type_data)
binding = type_data.get_binding()
binding.resolve_inner(ResolveContext)
for domain_data in type_map.domains():
for type_data in domain_data.types():
full_type_name = "%s.%s" % (type_data.get_domain_name(), type_data.get_name())
request = runtime_cast_generate_requests.pop(full_type_name, None)
binding = type_data.get_binding()
if request:
binding.request_user_runtime_cast(request)
if request and not request.is_acknowledged():
raise Exception("Failed to generate runtimeCast in " + full_type_name)
if verification:
for full_type_name in runtime_cast_generate_requests:
raise Exception("Failed to generate runtimeCast. Type " + full_type_name + " not found")
return ForwardListener
global_forward_listener = resolve_all_types()
def get_annotated_type_text(raw_type, annotated_type):
if annotated_type != raw_type:
return "/*%s*/ %s" % (annotated_type, raw_type)
else:
return raw_type
def format_setter_value_expression(param_type_binding, value_ref):
pattern = param_type_binding.get_setter_value_expression_pattern()
if pattern:
return pattern % value_ref
else:
return value_ref
class Generator:
frontend_class_field_lines = []
frontend_domain_class_lines = []
method_name_enum_list = []
backend_method_declaration_list = []
backend_method_implementation_list = []
backend_method_name_declaration_list = []
method_handler_list = []
frontend_method_list = []
backend_js_domain_initializer_list = []
backend_virtual_setters_list = []
backend_agent_interface_list = []
backend_setters_list = []
backend_constructor_init_list = []
backend_field_list = []
frontend_constructor_init_list = []
type_builder_fragments = []
type_builder_forwards = []
validator_impl_list = []
type_builder_impl_list = []
@staticmethod
def go():
Generator.process_types(type_map)
first_cycle_guardable_list_list = [
Generator.backend_method_declaration_list,
Generator.backend_method_implementation_list,
Generator.backend_method_name_declaration_list,
Generator.backend_agent_interface_list,
Generator.frontend_class_field_lines,
Generator.frontend_constructor_init_list,
Generator.frontend_domain_class_lines,
Generator.frontend_method_list,
Generator.method_handler_list,
Generator.method_name_enum_list,
Generator.backend_constructor_init_list,
Generator.backend_virtual_setters_list,
Generator.backend_setters_list,
Generator.backend_field_list]
for json_domain in json_api["domains"]:
domain_name = json_domain["domain"]
domain_name_lower = domain_name.lower()
domain_fixes = DomainNameFixes.get_fixed_data(domain_name)
domain_guard = domain_fixes.get_guard()
if domain_guard:
for l in first_cycle_guardable_list_list:
domain_guard.generate_open(l)
agent_field_name = domain_fixes.agent_field_name
frontend_method_declaration_lines = []
Generator.backend_js_domain_initializer_list.append("// %s.\n" % domain_name)
if not domain_fixes.skip_js_bind:
Generator.backend_js_domain_initializer_list.append("InspectorBackend.register%sDispatcher = InspectorBackend.registerDomainDispatcher.bind(InspectorBackend, \"%s\");\n" % (domain_name, domain_name))
if "types" in json_domain:
for json_type in json_domain["types"]:
if "type" in json_type and json_type["type"] == "string" and "enum" in json_type:
enum_name = "%s.%s" % (domain_name, json_type["id"])
Generator.process_enum(json_type, enum_name)
elif json_type["type"] == "object":
if "properties" in json_type:
for json_property in json_type["properties"]:
if "type" in json_property and json_property["type"] == "string" and "enum" in json_property:
enum_name = "%s.%s%s" % (domain_name, json_type["id"], to_title_case(json_property["name"]))
Generator.process_enum(json_property, enum_name)
if "events" in json_domain:
for json_event in json_domain["events"]:
Generator.process_event(json_event, domain_name, frontend_method_declaration_lines)
Generator.frontend_class_field_lines.append(" %s m_%s;\n" % (domain_name, domain_name_lower))
if Generator.frontend_constructor_init_list:
Generator.frontend_constructor_init_list.append(" , ")
Generator.frontend_constructor_init_list.append("m_%s(inspectorFrontendChannel)\n" % domain_name_lower)
Generator.frontend_domain_class_lines.append(Templates.frontend_domain_class.substitute(None,
domainClassName=domain_name,
domainFieldName=domain_name_lower,
frontendDomainMethodDeclarations="".join(flatten_list(frontend_method_declaration_lines))))
agent_interface_name = Capitalizer.lower_camel_case_to_upper(domain_name) + "CommandHandler"
Generator.backend_agent_interface_list.append(" class %s {\n" % agent_interface_name)
Generator.backend_agent_interface_list.append(" public:\n")
if "commands" in json_domain:
for json_command in json_domain["commands"]:
Generator.process_command(json_command, domain_name, agent_field_name, agent_interface_name)
Generator.backend_agent_interface_list.append("\n protected:\n")
Generator.backend_agent_interface_list.append(" virtual ~%s() { }\n" % agent_interface_name)
Generator.backend_agent_interface_list.append(" };\n\n")
Generator.backend_constructor_init_list.append(" , m_%s(0)" % agent_field_name)
Generator.backend_virtual_setters_list.append(" virtual void registerAgent(%s* %s) = 0;" % (agent_interface_name, agent_field_name))
Generator.backend_setters_list.append(" virtual void registerAgent(%s* %s) { ASSERT(!m_%s); m_%s = %s; }" % (agent_interface_name, agent_field_name, agent_field_name, agent_field_name, agent_field_name))
Generator.backend_field_list.append(" %s* m_%s;" % (agent_interface_name, agent_field_name))
if domain_guard:
for l in reversed(first_cycle_guardable_list_list):
domain_guard.generate_close(l)
Generator.backend_js_domain_initializer_list.append("\n")
@staticmethod
def process_enum(json_enum, enum_name):
enum_members = []
for member in json_enum["enum"]:
enum_members.append("%s: \"%s\"" % (fix_camel_case(member), member))
Generator.backend_js_domain_initializer_list.append("InspectorBackend.registerEnum(\"%s\", {%s});\n" % (
enum_name, ", ".join(enum_members)))
@staticmethod
def process_event(json_event, domain_name, frontend_method_declaration_lines):
event_name = json_event["name"]
ad_hoc_type_output = []
frontend_method_declaration_lines.append(ad_hoc_type_output)
ad_hoc_type_writer = Writer(ad_hoc_type_output, " ")
decl_parameter_list = []
json_parameters = json_event.get("parameters")
Generator.generate_send_method(json_parameters, event_name, domain_name, ad_hoc_type_writer,
decl_parameter_list,
Generator.EventMethodStructTemplate,
Generator.frontend_method_list, Templates.frontend_method, {"eventName": event_name})
backend_js_event_param_list = []
if json_parameters:
for parameter in json_parameters:
parameter_name = parameter["name"]
backend_js_event_param_list.append("\"%s\"" % parameter_name)
frontend_method_declaration_lines.append(
" void %s(%s);\n" % (event_name, ", ".join(decl_parameter_list)))
Generator.backend_js_domain_initializer_list.append("InspectorBackend.registerEvent(\"%s.%s\", [%s]);\n" % (
domain_name, event_name, ", ".join(backend_js_event_param_list)))
class EventMethodStructTemplate:
@staticmethod
def append_prolog(line_list):
line_list.append(" RefPtr<InspectorObject> paramsObject = InspectorObject::create();\n")
@staticmethod
def append_epilog(line_list):
line_list.append(" jsonMessage->setObject(\"params\", paramsObject);\n")
container_name = "paramsObject"
@staticmethod
def process_command(json_command, domain_name, agent_field_name, agent_interface_name):
json_command_name = json_command["name"]
cmd_enum_name = "k%s_%sCmd" % (domain_name, json_command["name"])
Generator.method_name_enum_list.append(" %s," % cmd_enum_name)
Generator.method_handler_list.append(" &InspectorBackendDispatcherImpl::%s_%s," % (domain_name, json_command_name))
Generator.backend_method_declaration_list.append(" void %s_%s(long callId, InspectorObject* requestMessageObject);" % (domain_name, json_command_name))
ad_hoc_type_output = []
Generator.backend_agent_interface_list.append(ad_hoc_type_output)
ad_hoc_type_writer = Writer(ad_hoc_type_output, " ")
Generator.backend_agent_interface_list.append(" virtual void %s(ErrorString*" % json_command_name)
method_in_code = ""
method_out_code = ""
agent_call_param_list = []
response_cook_list = []
request_message_param = ""
js_parameters_text = ""
if "parameters" in json_command:
json_params = json_command["parameters"]
method_in_code += Templates.param_container_access_code
request_message_param = " requestMessageObject"
js_param_list = []
for json_parameter in json_params:
json_param_name = json_parameter["name"]
param_raw_type = resolve_param_raw_type(json_parameter, domain_name)
getter_name = param_raw_type.get_getter_name()
optional = json_parameter.get("optional")
non_optional_type_model = param_raw_type.get_raw_type_model()
if optional:
type_model = non_optional_type_model.get_optional()
else:
type_model = non_optional_type_model
if optional:
code = (" bool %s_valueFound = false;\n"
" %s in_%s = get%s(paramsContainerPtr, \"%s\", &%s_valueFound, protocolErrorsPtr);\n" %
(json_param_name, non_optional_type_model.get_command_return_pass_model().get_return_var_type(), json_param_name, getter_name, json_param_name, json_param_name))
param = ", %s_valueFound ? &in_%s : 0" % (json_param_name, json_param_name)
# FIXME: pass optional refptr-values as PassRefPtr
formal_param_type_pattern = "const %s*"
else:
code = (" %s in_%s = get%s(paramsContainerPtr, \"%s\", 0, protocolErrorsPtr);\n" %
(non_optional_type_model.get_command_return_pass_model().get_return_var_type(), json_param_name, getter_name, json_param_name))
param = ", in_%s" % json_param_name
# FIXME: pass not-optional refptr-values as NonNullPassRefPtr
if param_raw_type.is_heavy_value():
formal_param_type_pattern = "const %s&"
else:
formal_param_type_pattern = "%s"
method_in_code += code
agent_call_param_list.append(param)
Generator.backend_agent_interface_list.append(", %s in_%s" % (formal_param_type_pattern % non_optional_type_model.get_command_return_pass_model().get_return_var_type(), json_param_name))
js_bind_type = param_raw_type.get_js_bind_type()
js_param_text = "{\"name\": \"%s\", \"type\": \"%s\", \"optional\": %s}" % (
json_param_name,
js_bind_type,
("true" if ("optional" in json_parameter and json_parameter["optional"]) else "false"))
js_param_list.append(js_param_text)
js_parameters_text = ", ".join(js_param_list)
response_cook_text = ""
if json_command.get("async") == True:
callback_name = Capitalizer.lower_camel_case_to_upper(json_command_name) + "Callback"
callback_output = []
callback_writer = Writer(callback_output, ad_hoc_type_writer.get_indent())
decl_parameter_list = []
Generator.generate_send_method(json_command.get("returns"), json_command_name, domain_name, ad_hoc_type_writer,
decl_parameter_list,
Generator.CallbackMethodStructTemplate,
Generator.backend_method_implementation_list, Templates.callback_method,
{"callbackName": callback_name, "agentName": agent_interface_name})
callback_writer.newline("class " + callback_name + " : public CallbackBase {\n")
callback_writer.newline("public:\n")
callback_writer.newline(" " + callback_name + "(PassRefPtr<InspectorBackendDispatcherImpl>, int id);\n")
callback_writer.newline(" void sendSuccess(" + ", ".join(decl_parameter_list) + ");\n")
callback_writer.newline("};\n")
ad_hoc_type_output.append(callback_output)
method_out_code += " RefPtr<" + agent_interface_name + "::" + callback_name + "> callback = adoptRef(new " + agent_interface_name + "::" + callback_name + "(this, callId));\n"
agent_call_param_list.append(", callback")
response_cook_text += " if (!error.length()) \n"
response_cook_text += " return;\n"
response_cook_text += " callback->disable();\n"
Generator.backend_agent_interface_list.append(", PassRefPtr<%s> callback" % callback_name)
else:
if "returns" in json_command:
method_out_code += "\n"
for json_return in json_command["returns"]:
json_return_name = json_return["name"]
optional = bool(json_return.get("optional"))
return_type_binding = Generator.resolve_type_and_generate_ad_hoc(json_return, json_command_name, domain_name, ad_hoc_type_writer, agent_interface_name + "::")
raw_type = return_type_binding.reduce_to_raw_type()
setter_type = raw_type.get_setter_name()
initializer = raw_type.get_c_initializer()
type_model = return_type_binding.get_type_model()
if optional:
type_model = type_model.get_optional()
code = " %s out_%s;\n" % (type_model.get_command_return_pass_model().get_return_var_type(), json_return_name)
param = ", %sout_%s" % (type_model.get_command_return_pass_model().get_output_argument_prefix(), json_return_name)
var_name = "out_%s" % json_return_name
setter_argument = type_model.get_command_return_pass_model().get_output_to_raw_expression() % var_name
if return_type_binding.get_setter_value_expression_pattern():
setter_argument = return_type_binding.get_setter_value_expression_pattern() % setter_argument
cook = " result->set%s(\"%s\", %s);\n" % (setter_type, json_return_name,
setter_argument)
set_condition_pattern = type_model.get_command_return_pass_model().get_set_return_condition()
if set_condition_pattern:
cook = (" if (%s)\n " % (set_condition_pattern % var_name)) + cook
annotated_type = type_model.get_command_return_pass_model().get_output_parameter_type()
param_name = "out_%s" % json_return_name
if optional:
param_name = "opt_" + param_name
Generator.backend_agent_interface_list.append(", %s %s" % (annotated_type, param_name))
response_cook_list.append(cook)
method_out_code += code
agent_call_param_list.append(param)
response_cook_text = "".join(response_cook_list)
if len(response_cook_text) != 0:
response_cook_text = " if (!error.length()) {\n" + response_cook_text + " }"
backend_js_reply_param_list = []
if "returns" in json_command:
for json_return in json_command["returns"]:
json_return_name = json_return["name"]
backend_js_reply_param_list.append("\"%s\"" % json_return_name)
js_reply_list = "[%s]" % ", ".join(backend_js_reply_param_list)
Generator.backend_method_implementation_list.append(Templates.backend_method.substitute(None,
domainName=domain_name, methodName=json_command_name,
agentField="m_" + agent_field_name,
methodInCode=method_in_code,
methodOutCode=method_out_code,
agentCallParams="".join(agent_call_param_list),
requestMessageObject=request_message_param,
responseCook=response_cook_text,
commandNameIndex=cmd_enum_name))
Generator.backend_method_name_declaration_list.append(" \"%s.%s\"," % (domain_name, json_command_name))
Generator.backend_js_domain_initializer_list.append("InspectorBackend.registerCommand(\"%s.%s\", [%s], %s);\n" % (domain_name, json_command_name, js_parameters_text, js_reply_list))
Generator.backend_agent_interface_list.append(") = 0;\n")
class CallbackMethodStructTemplate:
@staticmethod
def append_prolog(line_list):
pass
@staticmethod
def append_epilog(line_list):
pass
container_name = "jsonMessage"
# Generates common code for event sending and callback response data sending.
@staticmethod
def generate_send_method(parameters, event_name, domain_name, ad_hoc_type_writer, decl_parameter_list,
method_struct_template,
generator_method_list, method_template, template_params):
method_line_list = []
if parameters:
method_struct_template.append_prolog(method_line_list)
for json_parameter in parameters:
parameter_name = json_parameter["name"]
param_type_binding = Generator.resolve_type_and_generate_ad_hoc(json_parameter, event_name, domain_name, ad_hoc_type_writer, "")
raw_type = param_type_binding.reduce_to_raw_type()
raw_type_binding = RawTypeBinding(raw_type)
optional = bool(json_parameter.get("optional"))
setter_type = raw_type.get_setter_name()
type_model = param_type_binding.get_type_model()
raw_type_model = raw_type_binding.get_type_model()
if optional:
type_model = type_model.get_optional()
raw_type_model = raw_type_model.get_optional()
annotated_type = type_model.get_input_param_type_text()
mode_type_binding = param_type_binding
decl_parameter_list.append("%s %s" % (annotated_type, parameter_name))
setter_argument = raw_type_model.get_event_setter_expression_pattern() % parameter_name
if mode_type_binding.get_setter_value_expression_pattern():
setter_argument = mode_type_binding.get_setter_value_expression_pattern() % setter_argument
setter_code = " %s->set%s(\"%s\", %s);\n" % (method_struct_template.container_name, setter_type, parameter_name, setter_argument)
if optional:
setter_code = (" if (%s)\n " % parameter_name) + setter_code
method_line_list.append(setter_code)
method_struct_template.append_epilog(method_line_list)
generator_method_list.append(method_template.substitute(None,
domainName=domain_name,
parameters=", ".join(decl_parameter_list),
code="".join(method_line_list), **template_params))
@staticmethod
def resolve_type_and_generate_ad_hoc(json_param, method_name, domain_name, ad_hoc_type_writer, container_relative_name_prefix_param):
param_name = json_param["name"]
ad_hoc_type_list = []
class AdHocTypeContext:
container_full_name_prefix = "<not yet defined>"
container_relative_name_prefix = container_relative_name_prefix_param
@staticmethod
def get_type_name_fix():
class NameFix:
class_name = Capitalizer.lower_camel_case_to_upper(param_name)
@staticmethod
def output_comment(writer):
writer.newline("// Named after parameter '%s' while generating command/event %s.\n" % (param_name, method_name))
return NameFix
@staticmethod
def add_type(binding):
ad_hoc_type_list.append(binding)
type_binding = resolve_param_type(json_param, domain_name, AdHocTypeContext)
class InterfaceForwardListener:
@staticmethod
def add_type_data(type_data):
pass
class InterfaceResolveContext:
forward_listener = InterfaceForwardListener
for type in ad_hoc_type_list:
type.resolve_inner(InterfaceResolveContext)
class InterfaceGenerateContext:
validator_writer = "not supported in InterfaceGenerateContext"
cpp_writer = validator_writer
for type in ad_hoc_type_list:
generator = type.get_code_generator()
if generator:
generator.generate_type_builder(ad_hoc_type_writer, InterfaceGenerateContext)
return type_binding
@staticmethod
def process_types(type_map):
output = Generator.type_builder_fragments
class GenerateContext:
validator_writer = Writer(Generator.validator_impl_list, "")
cpp_writer = Writer(Generator.type_builder_impl_list, "")
def generate_all_domains_code(out, type_data_callback):
writer = Writer(out, "")
for domain_data in type_map.domains():
domain_fixes = DomainNameFixes.get_fixed_data(domain_data.name())
domain_guard = domain_fixes.get_guard()
namespace_declared = []
def namespace_lazy_generator():
if not namespace_declared:
if domain_guard:
domain_guard.generate_open(out)
writer.newline("namespace ")
writer.append(domain_data.name())
writer.append(" {\n")
# What is a better way to change value from outer scope?
namespace_declared.append(True)
return writer
for type_data in domain_data.types():
type_data_callback(type_data, namespace_lazy_generator)
if namespace_declared:
writer.append("} // ")
writer.append(domain_data.name())
writer.append("\n\n")
if domain_guard:
domain_guard.generate_close(out)
def create_type_builder_caller(generate_pass_id):
def call_type_builder(type_data, writer_getter):
code_generator = type_data.get_binding().get_code_generator()
if code_generator and generate_pass_id == code_generator.get_generate_pass_id():
writer = writer_getter()
code_generator.generate_type_builder(writer, GenerateContext)
return call_type_builder
generate_all_domains_code(output, create_type_builder_caller(TypeBuilderPass.MAIN))
Generator.type_builder_forwards.append("// Forward declarations.\n")
def generate_forward_callback(type_data, writer_getter):
if type_data in global_forward_listener.type_data_set:
binding = type_data.get_binding()
binding.get_code_generator().generate_forward_declaration(writer_getter())
generate_all_domains_code(Generator.type_builder_forwards, generate_forward_callback)
Generator.type_builder_forwards.append("// End of forward declarations.\n\n")
Generator.type_builder_forwards.append("// Typedefs.\n")
generate_all_domains_code(Generator.type_builder_forwards, create_type_builder_caller(TypeBuilderPass.TYPEDEF))
Generator.type_builder_forwards.append("// End of typedefs.\n\n")
def flatten_list(input):
res = []
def fill_recursive(l):
for item in l:
if isinstance(item, list):
fill_recursive(item)
else:
res.append(item)
fill_recursive(input)
return res
# A writer that only updates file if it actually changed to better support incremental build.
class SmartOutput:
def __init__(self, file_name):
self.file_name_ = file_name
self.output_ = ""
def write(self, text):
self.output_ += text
def close(self):
text_changed = True
self.output_ = self.output_.rstrip() + "\n"
try:
read_file = open(self.file_name_, "r")
old_text = read_file.read()
read_file.close()
text_changed = old_text != self.output_
except:
# Ignore, just overwrite by default
pass
if text_changed or write_always:
out_file = open(self.file_name_, "w")
out_file.write(self.output_)
out_file.close()
Generator.go()
backend_h_file = SmartOutput(output_header_dirname + "/InspectorBackendDispatcher.h")
backend_cpp_file = SmartOutput(output_cpp_dirname + "/InspectorBackendDispatcher.cpp")
frontend_h_file = SmartOutput(output_header_dirname + "/InspectorFrontend.h")
frontend_cpp_file = SmartOutput(output_cpp_dirname + "/InspectorFrontend.cpp")
typebuilder_h_file = SmartOutput(output_header_dirname + "/InspectorTypeBuilder.h")
typebuilder_cpp_file = SmartOutput(output_cpp_dirname + "/InspectorTypeBuilder.cpp")
backend_js_file = SmartOutput(output_js_dirname + "/InspectorBackendCommands.js")
backend_h_file.write(Templates.backend_h.substitute(None,
virtualSetters="\n".join(Generator.backend_virtual_setters_list),
agentInterfaces="".join(flatten_list(Generator.backend_agent_interface_list)),
methodNamesEnumContent="\n".join(Generator.method_name_enum_list)))
backend_cpp_file.write(Templates.backend_cpp.substitute(None,
constructorInit="\n".join(Generator.backend_constructor_init_list),
setters="\n".join(Generator.backend_setters_list),
fieldDeclarations="\n".join(Generator.backend_field_list),
methodNameDeclarations="\n".join(Generator.backend_method_name_declaration_list),
methods="\n".join(Generator.backend_method_implementation_list),
methodDeclarations="\n".join(Generator.backend_method_declaration_list),
messageHandlers="\n".join(Generator.method_handler_list)))
frontend_h_file.write(Templates.frontend_h.substitute(None,
fieldDeclarations="".join(Generator.frontend_class_field_lines),
domainClassList="".join(Generator.frontend_domain_class_lines)))
frontend_cpp_file.write(Templates.frontend_cpp.substitute(None,
constructorInit="".join(Generator.frontend_constructor_init_list),
methods="\n".join(Generator.frontend_method_list)))
typebuilder_h_file.write(Templates.typebuilder_h.substitute(None,
typeBuilders="".join(flatten_list(Generator.type_builder_fragments)),
forwards="".join(Generator.type_builder_forwards),
validatorIfdefName=VALIDATOR_IFDEF_NAME))
typebuilder_cpp_file.write(Templates.typebuilder_cpp.substitute(None,
enumConstantValues=EnumConstants.get_enum_constant_code(),
implCode="".join(flatten_list(Generator.type_builder_impl_list)),
validatorCode="".join(flatten_list(Generator.validator_impl_list)),
validatorIfdefName=VALIDATOR_IFDEF_NAME))
backend_js_file.write(Templates.backend_js.substitute(None,
domainInitializers="".join(Generator.backend_js_domain_initializer_list)))
backend_h_file.close()
backend_cpp_file.close()
frontend_h_file.close()
frontend_cpp_file.close()
typebuilder_h_file.close()
typebuilder_cpp_file.close()
backend_js_file.close()
| klim-iv/phantomjs-qt5 | src/webkit/Source/WebCore/inspector/CodeGeneratorInspector.py | Python | bsd-3-clause | 98,207 |
from datetime import datetime
from django.db import models
from uuidfield.fields import UUIDField
from access import acl
import amo.models
from translations.fields import save_signal
from mkt.constants import comm as const
class CommunicationPermissionModel(amo.models.ModelBase):
# Read permissions imply write permissions as well.
read_permission_public = models.BooleanField()
read_permission_developer = models.BooleanField()
read_permission_reviewer = models.BooleanField()
read_permission_senior_reviewer = models.BooleanField()
read_permission_mozilla_contact = models.BooleanField()
read_permission_staff = models.BooleanField()
class Meta:
abstract = True
def check_acls(user, obj, acl_type):
"""Check ACLs."""
if acl_type == 'moz_contact':
try:
return user.email in obj.addon.get_mozilla_contacts()
except AttributeError:
return user.email in obj.thread.addon.get_mozilla_contacts()
if acl_type == 'admin':
return acl.action_allowed_user(user, 'Admin', '%')
elif acl_type == 'reviewer':
return acl.action_allowed_user(user, 'Apps', 'Review')
elif acl_type == 'senior_reviewer':
return acl.action_allowed_user(user, 'Apps', 'ReviewEscalated')
else:
raise Exception('Invalid ACL lookup.')
return False
def check_acls_comm_obj(obj, profile):
"""Cross-reference ACLs and Note/Thread permissions."""
if obj.read_permission_public:
return True
if (obj.read_permission_reviewer and
check_acls(profile, obj, 'reviewer')):
return True
if (obj.read_permission_senior_reviewer and
check_acls(profile, obj, 'senior_reviewer')):
return True
if (obj.read_permission_mozilla_contact and
check_acls(profile, obj, 'moz_contact')):
return True
if (obj.read_permission_staff and
check_acls(profile, obj, 'admin')):
return True
return False
def user_has_perm_thread(thread, profile):
"""
Check if the user has read/write permissions on the given thread.
Developers of the add-on used in the thread, users in the CC list,
and users who post to the thread are allowed to access the object.
Moreover, other object permissions are also checked agaisnt the ACLs
of the user.
"""
user_post = CommunicationNote.objects.filter(
author=profile, thread=thread)
user_cc = CommunicationThreadCC.objects.filter(
user=profile, thread=thread)
if user_post.exists() or user_cc.exists():
return True
# User is a developer of the add-on and has the permission to read.
user_is_author = profile.addons.filter(pk=thread.addon_id)
if thread.read_permission_developer and user_is_author.exists():
return True
return check_acls_comm_obj(thread, profile)
def user_has_perm_note(note, profile):
"""
Check if the user has read/write permissions on the given note.
Developers of the add-on used in the note, users in the CC list,
and users who post to the thread are allowed to access the object.
Moreover, other object permissions are also checked agaisnt the ACLs
of the user.
"""
if note.author.id == profile.id:
# Let the dude access his own note.
return True
# User is a developer of the add-on and has the permission to read.
user_is_author = profile.addons.filter(pk=note.thread.addon_id)
if note.read_permission_developer and user_is_author.exists():
return True
return check_acls_comm_obj(note, profile)
class CommunicationThread(CommunicationPermissionModel):
addon = models.ForeignKey('addons.Addon', related_name='threads')
version = models.ForeignKey('versions.Version', related_name='threads',
null=True)
class Meta:
db_table = 'comm_threads'
class CommunicationThreadCC(amo.models.ModelBase):
thread = models.ForeignKey(CommunicationThread,
related_name='thread_cc')
user = models.ForeignKey('users.UserProfile',
related_name='comm_thread_cc')
class Meta:
db_table = 'comm_thread_cc'
unique_together = ('user', 'thread',)
class CommunicationNoteManager(models.Manager):
def with_perms(self, profile, thread):
ids = [note.id for note in self.filter(thread=thread) if
user_has_perm_note(note, profile)]
return self.filter(id__in=ids)
class CommunicationNote(CommunicationPermissionModel):
thread = models.ForeignKey(CommunicationThread, related_name='notes')
author = models.ForeignKey('users.UserProfile', related_name='comm_notes')
note_type = models.IntegerField()
body = models.TextField(null=True)
reply_to = models.ForeignKey('self', related_name='replies', null=True,
blank=True)
read_by_users = models.ManyToManyField('users.UserProfile',
through='CommunicationNoteRead')
objects = CommunicationNoteManager()
class Meta:
db_table = 'comm_thread_notes'
def save(self, *args, **kwargs):
super(CommunicationNote, self).save(*args, **kwargs)
self.thread.modified = self.created
self.thread.save()
class CommunicationNoteRead(models.Model):
user = models.ForeignKey('users.UserProfile')
note = models.ForeignKey(CommunicationNote)
class Meta:
db_table = 'comm_notes_read'
class CommunicationThreadToken(amo.models.ModelBase):
thread = models.ForeignKey(CommunicationThread, related_name='token')
user = models.ForeignKey('users.UserProfile',
related_name='comm_thread_tokens')
uuid = UUIDField(unique=True, auto=True)
use_count = models.IntegerField(default=0,
help_text='Stores the number of times the token has been used')
class Meta:
db_table = 'comm_thread_tokens'
unique_together = ('thread', 'user')
def is_valid(self):
# TODO: Confirm the expiration and max use count values.
timedelta = datetime.now() - self.modified
return (timedelta.days <= const.THREAD_TOKEN_EXPIRY and
self.use_count < const.MAX_TOKEN_USE_COUNT)
def reset_uuid(self):
# Generate a new UUID.
self.uuid = UUIDField()._create_uuid().hex
models.signals.pre_save.connect(save_signal, sender=CommunicationNote,
dispatch_uid='comm_thread_notes_translations')
| Joergen/zamboni | apps/comm/models.py | Python | bsd-3-clause | 6,495 |
__author__ = 'Robbert Harms'
__date__ = "2015-04-23"
__maintainer__ = "Robbert Harms"
__email__ = "robbert.harms@maastrichtuniversity.nl"
class DVS(object):
def __init__(self, comments, dvs_tables):
"""Create a new DVS object
Args:
comments (str): The list with comments on top of the file
dvs_tables (list of DVSDirectionTable): The list with the direction tables
Attributes:
comments (str): The list with comments on top of the file
dvs_tables (list of DVSDirectionTable): The list with the direction tables
"""
self.comments = comments
self.dvs_tables = dvs_tables
def get_file_string(self, windows_line_endings=True):
"""Get a complete string representation of the DVS.
Args:
windows_line_endings (boolean): If we want to include an \r before every \n
"""
s = self.comments + "\n"
s += "\n".join([table.get_file_string(windows_line_endings=False) for table in self.dvs_tables])
if windows_line_endings:
s = s.replace("\n", "\r\n")
return s
def get_overview_representation(self):
"""Get a small overview of the contained contents."""
s = 'Nmr tables: {}'.format(len(self.dvs_tables)) + "\n"
for i, table in enumerate(self.dvs_tables):
s += 'Table {}: {} directions'.format(i, table.table.shape[0]) + "\n"
return s
class DVSDirectionTable(object):
def __init__(self, table, comments='', coordinate_system='xyz', normalisation='none'):
"""A representation of a direction table.
Args:
table (ndarray): The actual table
comments (str): The list with comments above this table
coordinate_system (str): The coordinate system (for example 'xyz')
normalisation (str): The normalisation definition (normally 'none')
Attributes:
table (ndarray): The actual table
comments (str): The list with comments above this table
coordinate_system (str): The coordinate system (for example 'xyz')
normalisation (str): The normalisation definition (normally 'none')
"""
self.table = table
self.comments = comments
self.coordinate_system = coordinate_system
self.normalisation = normalisation
def get_file_string(self, windows_line_endings=True):
"""Get a complete string representation of this direction table.
Args:
windows_line_endings (boolean): If we want to include an \r before every \n
"""
s = self.comments
s += '[directions={}]'.format(self.table.shape[0]) + "\n"
s += 'CoordinateSystem = {}'.format(self.coordinate_system) + "\n"
s += 'Normalisation = {}'.format(self.normalisation) + "\n"
for i in range(self.table.shape[0]):
s += 'Vector[{0}] = ( {1}, {2}, {3} )'.format(i, *self.table[i, :]) + "\n"
if windows_line_endings:
s = s.replace("\n", "\r\n")
return s | robbert-harms/mri-tools | mri_tools/dvs/base.py | Python | bsd-3-clause | 3,085 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# https://en.wikipedia.org/w/index.php?title=List_of_computing_and_IT_abbreviations&action=edit
import re, urllib2
from collections import defaultdict
from BeautifulSoup import BeautifulSoup
pull = lambda url: urllib2.urlopen(urllib2.Request(url))
wikip = lambda article: pull('https://en.wikipedia.org/w/index.php?title=%s&action=edit' % article)
# todo: List_of_file_formats
def stock():
ad = defaultdict(list)
for line in open('acronyms'):
if '\t' not in line:
continue
line = line.strip()
a,d = line.split('\t')
ad[a].append(d)
for line in open('acronyms.comp'):
if '\t' not in line:
continue
line = line.strip()
a,d = line.split('\t')
ad[a].append(d)
return ad
def exists(key, value, lut):
key = key.upper()
if key not in lut:
return False
value = value.upper()
return any(v.upper()==value for v in lut[key])
def computing_abbrev():
"This parser is very brittle, but the input is very well formed"
wikip = open # uncomment for local debug
html = wikip('List_of_computing_and_IT_abbreviations').read()
soup = BeautifulSoup(html)
text = soup.textarea.contents[0]
ad = defaultdict(list)
for pair in re.findall('\* \[\[.*—.*', str(text)):
try:
a,_,d = pair.partition('—')
a = a[4:].rpartition('|')[-1].replace(']]', '')
d = d.replace('[[', '').replace(']]', '').replace('—', ' - ')
ad[a].append(d.strip())
except:
#print 'failed on', pair
continue
return ad
def main():
"build all the new lists"
# okay, there is just the one for now
ad = computing_abbrev()
stk = stock()
tech = open('acronyms.computing', 'w')
tech.write('$ArchLinux: wikipedia computer abbrevs 2018-05-31\n\n')
for a,ds in sorted(ad.items()):
for d in ds:
if exists(a, d, stk):
continue
tech.write('%s\t%s\n'% (a.upper(), d))
tech.close()
if __name__ == '__main__':
main()
| keenerd/wtf | wikipedia.py | Python | bsd-3-clause | 2,140 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': _("This username has already been taken.")
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm | dogukantufekci/supersalon | supersalon/users/admin.py | Python | bsd-3-clause | 1,106 |
## @package csnStandardModuleProject
# Definition of the methods used for project configuration.
# This should be the only CSnake import in a project configuration.
import csnUtility
import csnProject
import csnBuild
import os.path
import inspect
from csnProject import GenericProject
class StandardModuleProject(GenericProject):
""" GenericProject with applications and modules in specific folders. """
def __init__(self, _name, _type, _sourceRootFolder = None, _categories = None):
if _sourceRootFolder is None:
filename = csnProject.FindFilename(1)
dirname = os.path.dirname(filename)
_sourceRootFolder = csnUtility.NormalizePath(dirname, _correctCase = False)
GenericProject.__init__(self, _name=_name, _type=_type, _sourceRootFolder=_sourceRootFolder, _categories=_categories, _context=csnProject.globalCurrentContext)
self.applicationsProject = None
def AddLibraryModules(self, _libModules):
"""
Adds source files (anything matching *.c??) and public include folders to self, using a set of libmodules.
It is assumed that the root folder of self has a subfolder called libmodules. The subfolders of libmodules should
contain a subfolder called src (e.g. for mymodule, this would be libmodules/mymodule/src).
If the src folder has a subfolder called 'stub', it is also added to the source tree.
_libModules - a list of subfolders of the libmodules folder that should be 'added' to self.
"""
# add sources
sourceRootFolder = self.GetSourceRootFolder()
includeFileExtensions = csnUtility.GetIncludeFileExtensions()
sourceFileExtensions = csnUtility.GetSourceFileExtensions()
for libModule in _libModules:
for stub in ("/stub", ""):
srcFolder = "libmodules/%s/src%s" % (libModule, stub)
srcFolderAbs = "%s/%s" % (sourceRootFolder, srcFolder)
if( os.path.exists(srcFolderAbs) ):
self.AddIncludeFolders([srcFolder])
for extension in sourceFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for libModule in _libModules:
for stub in ("/stub", ""):
includeFolder = "libmodules/%s/include%s" % (libModule, stub)
includeFolderAbs = "%s/%s" % (sourceRootFolder, includeFolder)
if( os.path.exists(includeFolderAbs) ):
self.AddIncludeFolders([includeFolder])
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (includeFolder, extension)], _checkExists = 0)
def AddApplications(self, _modules, _pch="", _applicationDependenciesList=None, _holderName=None, _properties = []):
"""
Creates extra CSnake projects, each project building one application in the 'Applications' subfolder of the current project.
_modules - List of the subfolders within the 'Applications' subfolder that must be scanned for applications.
_pch - If not "", this is the include file used to generate a precompiled header for each application.
"""
dependencies = [self]
if not _applicationDependenciesList is None:
dependencies.extend(_applicationDependenciesList)
if _holderName is None:
_holderName = "%sApplications" % self.name
csnProject.globalCurrentContext.SetSuperSubCategory("Applications", _holderName)
if self.applicationsProject is None:
self.applicationsProject = csnBuild.Project(self.name + "Applications", "container", _sourceRootFolder = self.GetSourceRootFolder(), _categories = [_holderName])
#self.applicationsProject.AddSources([csnUtility.GetDummyCppFilename()], _sourceGroup = "CSnakeGeneratedFiles")
self.applicationsProject.AddProjects([self])
self.AddProjects([self.applicationsProject], _dependency = 0)
# look for an 'applications' or 'Applications' folder
_modulesFolder = "%s/applications" % self.GetSourceRootFolder()
if not os.path.exists(_modulesFolder):
_modulesFolder = "%s/Applications" % self.GetSourceRootFolder()
self.__AddApplications(self.applicationsProject, dependencies, _modules, _modulesFolder, _pch, _holderName, _properties)
def __AddApplications(self, _holderProject, _applicationDependenciesList, _modules, _modulesFolder, _pch = "", _holderName=None, _properties = []):
"""
Creates application projects and adds them to _holderProject (using _holderProject.AddProject). The holder
project does not depend on these application projects.
It is assumed that _modules is a list containing subfolders of _modulesFolder.
Each subfolder in _modules should contain source files (.cpp, .cxx or .cc), where each source file corresponds to a single application.
Hence, each source file is used to create a new application project. For example, assuming that the _modulesFolder
is called 'Applications', the file 'Applications/Small/Tiny.cpp' will be used to build the 'Tiny' application.
_applicationDependenciesList - List of projects that each new application project is dependent on.
_modulesFolder - Folder containing subfolders with applications.
_modules = List of subfolders of _modulesFolder that should be processed.
_pch - If not "", this is the C++ include file which is used for building a precompiled header file for each application.
"""
for module in _modules:
moduleFolder = "%s/%s" % (_modulesFolder, module)
sourceFiles = []
headerFiles = []
for extension in csnUtility.GetSourceFileExtensions():
sourceFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for extension in csnUtility.GetIncludeFileExtensions():
headerFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for sourceFile in sourceFiles:
if os.path.isdir(sourceFile):
continue
name = os.path.splitext( os.path.basename(sourceFile) )[0]
name = name.replace(' ', '_')
if _holderName is None:
_holderName = _holderProject.name
app = csnBuild.Project("%s_%s" % (_holderName, name), "executable", _sourceRootFolder = _holderProject.GetSourceRootFolder())
app.AddIncludeFolders([moduleFolder])
app.AddProjects(_applicationDependenciesList)
app.AddSources([sourceFile])
app.AddProperties( _properties )
# add header files so that they appear in visual studio
app.AddSources(headerFiles)
if( _pch != "" ):
app.SetPrecompiledHeader(_pch)
_holderProject.AddProjects([app])
| csnake-org/CSnake | src/csnStandardModuleProject.py | Python | bsd-3-clause | 7,313 |
# -*- coding: utf-8 -*
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import requests
import os
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.table import Table, Column
from astropy.io.votable import parse
from astroquery import log
from astroquery.casda import Casda
try:
from unittest.mock import Mock, patch, MagicMock
except ImportError:
pytest.skip("Install mock for the casda tests.", allow_module_level=True)
DATA_FILES = {'CIRCLE': 'cone.xml', 'RANGE': 'box.xml', 'DATALINK': 'datalink.xml', 'RUN_JOB': 'run_job.xml',
'COMPLETED_JOB': 'completed_job.xml', 'DATALINK_NOACCESS': 'datalink_noaccess.xml'}
class MockResponse:
def __init__(self, content):
self.content = content
self.text = content.decode()
def raise_for_status(self):
return
first_job_pass = True
def get_mockreturn(self, method, url, data=None, timeout=10,
files=None, params=None, headers=None, **kwargs):
log.debug("get_mockreturn url:{} params:{} kwargs:{}".format(url, params, kwargs))
if kwargs and 'auth' in kwargs:
auth = kwargs['auth']
if auth and (auth[0] != 'user' or auth[1] != 'password'):
log.debug("Rejecting credentials")
return create_auth_failure_response()
if 'data/async' in str(url):
# Responses for an asynchronous SODA job
if str(url).endswith('data/async'):
self.first_job_pass = True
return create_soda_create_response('111-000-111-000')
elif str(url).endswith('/phase') and method == 'POST':
key = "RUN_JOB"
elif str(url).endswith('111-000-111-000') and method == 'GET':
key = "RUN_JOB" if self.first_job_pass else "COMPLETED_JOB"
self.first_job_pass = False
else:
raise ValueError("Unexpected SODA async {} call to url {}".format(method, url))
elif 'datalink' in str(url):
if 'cube-244' in str(url):
key = 'DATALINK'
else:
key = 'DATALINK_NOACCESS'
else:
key = params['POS'].split()[0] if params['POS'] else None
filename = data_path(DATA_FILES[key])
log.debug('providing ' + filename)
content = open(filename, 'rb').read()
return MockResponse(content)
def create_soda_create_response(jobid):
job_url = 'https://casda.csiro.au/casda_data_access/data/async/' + jobid
create_response_headers = [
['location', job_url]
]
create_response = Mock(spec=requests.Response)
create_response.configure_mock(status_code=303, message='OK', headers=create_response_headers, url=job_url)
return create_response
def create_auth_failure_response():
unauthenticated_headers = [
['WWW-Authenticate', 'Basic realm="ATNF OPAL Login"']
]
create_response = MagicMock(spec=requests.Response)
attrs = {'raise_for_status.side_effect': requests.exceptions.HTTPError()}
create_response.configure_mock(status_code=401, message='OK', headers=unauthenticated_headers, **attrs)
return create_response
@pytest.fixture
def patch_get(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(requests.Session, 'request', get_mockreturn)
return mp
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def isclose(value1, value2, abs_tol=1e-09):
return abs(value1 - value2) < abs_tol
def test_query_region_text_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
query_payload = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=radius * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-4)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-4)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region('22h15m38.2s -45d50m30.5s', radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 3
def test_query_region_async_radius(patch_get):
ra = 333.9092
dec = -45.8418
radius = 0.5
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, radius=radius * u.deg, cache=False, get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('CIRCLE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'CIRCLE'
assert isclose(float(pos_parts[1]), ra, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), dec, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), radius)
assert len(pos_parts) == 4
responses = Casda.query_region_async(centre, radius=0.5 * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_query_region_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, Table)
assert len(responses) == 2
def test_query_region_async_box(patch_get):
ra = 333.9092
dec = -45.8418
width = 0.5
height = 0.2
centre = SkyCoord(ra, dec, unit=('deg', 'deg'))
query_payload = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False,
get_query_payload=True)
assert isinstance(query_payload, dict)
assert 'POS' in query_payload
assert query_payload['POS'].startswith('RANGE 333')
pos_parts = query_payload['POS'].split(' ')
assert pos_parts[0] == 'RANGE'
assert isclose(float(pos_parts[1]), ra - width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[2]), ra + width / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[3]), dec - height / 2, abs_tol=1e-5)
assert isclose(float(pos_parts[4]), dec + height / 2, abs_tol=1e-5)
assert len(pos_parts) == 5
responses = Casda.query_region_async(centre, width=width * u.deg, height=height * u.deg, cache=False)
assert isinstance(responses, MockResponse)
def test_filter_out_unreleased():
all_records = parse(data_path('partial_unreleased.xml'), verify='warn').get_first_table().to_table()
assert all_records[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert all_records[1]['obs_release_date'] == '2218-01-02T16:51:00.728Z'
assert all_records[2]['obs_release_date'] == ''
assert len(all_records) == 3
# This should filter out the rows with either a future obs_release_date or no obs_release_date
filtered = Casda.filter_out_unreleased(all_records)
assert filtered[0]['obs_release_date'] == '2017-08-02T03:51:19.728Z'
assert filtered[0]['obs_publisher_did'] == 'cube-502'
assert len(filtered) == 1
def test_stage_data_unauthorised(patch_get):
table = Table()
with pytest.raises(ValueError) as excinfo:
Casda.stage_data(table)
assert "Credentials must be supplied" in str(excinfo.value)
def test_stage_data_empty(patch_get):
table = Table()
casda = Casda('user', 'password')
urls = casda.stage_data(table)
assert urls == []
def test_stage_data_invalid_credentials(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-220']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'notthepassword')
with pytest.raises(requests.exceptions.HTTPError) as excinfo:
casda.stage_data(table)
def test_stage_data_no_link(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-240']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'password')
casda.POLL_INTERVAL = 1
with pytest.raises(ValueError) as excinfo:
casda.stage_data(table)
assert "You do not have access to any of the requested data files." in str(excinfo.value)
def test_stage_data(patch_get):
prefix = 'https://somewhere/casda/datalink/links?'
access_urls = [prefix + 'cube-244']
table = Table([Column(data=access_urls, name='access_url')])
casda = Casda('user', 'password')
casda.POLL_INTERVAL = 1
urls = casda.stage_data(table, verbose=True)
assert urls == ['http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits.checksum',
'http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits']
| ceb8/astroquery | astroquery/casda/tests/test_casda.py | Python | bsd-3-clause | 10,415 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test QiPackage """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import pytest
import qitoolchain.qipackage
import qisys.archive
from qisys.test.conftest import skip_on_win
def test_equality():
""" Test Equality """
foo1 = qitoolchain.qipackage.QiPackage("foo", "1.2")
foo2 = qitoolchain.qipackage.QiPackage("foo", "1.2")
foo3 = qitoolchain.qipackage.QiPackage("foo", "1.3")
bar1 = qitoolchain.qipackage.QiPackage("bar", "1.2")
assert foo1 == foo2
assert foo2 < foo3
assert foo1 != bar1
def test_from_archive(tmpdir):
""" Test From Archive """
foo1 = tmpdir.mkdir("foo")
foo_xml = foo1.join("package.xml")
foo_xml.write("""<package name="foo" version="0.1"/>""")
archive = qisys.archive.compress(foo1.strpath, flat=True)
package = qitoolchain.qipackage.from_archive(archive)
assert package.name == "foo"
assert package.version == "0.1"
def test_skip_package_xml(tmpdir):
""" Test Skip Package Xml """
foo1 = tmpdir.mkdir("foo")
foo_xml = foo1.join("package.xml")
foo_xml.write("""<package name="foo" version="0.1"/>""")
foo1.ensure("include", "foo.h", file=True)
foo1.ensure("lib", "libfoo.so", file=True)
package = qitoolchain.qipackage.QiPackage("foo", path=foo1.strpath)
dest = tmpdir.join("dest")
package.install(dest.strpath)
assert dest.join("include", "foo.h").check(file=True)
assert dest.join("lib", "libfoo.so").check(file=True)
assert not dest.join("package.xml").check(file=True)
def test_reads_runtime_manifest(tmpdir):
""" Test Read Runtime Manifest """""
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
runtime_manifest = boost_path.ensure("install_manifest_runtime.txt", file=True)
runtime_manifest.write(b"""lib/libboost.so\n""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
installed = package.install(dest.strpath, components=["runtime"])
assert not dest.join("include", "boost.h").check(file=True)
libbost_so = dest.join("lib", "libboost.so")
assert libbost_so.check(file=True)
assert installed == ["lib/libboost.so"]
def test_backward_compat_runtime_install(tmpdir):
""" Test Backward Compat Runtime """
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
boost_path.ensure("package.xml", file=True)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
installed = package.install(dest.strpath, components=["runtime"])
assert not dest.join("include", "boost.h").check(file=True)
libbost_so = dest.join("lib", "libboost.so")
assert libbost_so.check(file=True)
assert installed == ["lib/libboost.so"]
def test_reads_release_mask(tmpdir):
""" Test Reads Release Mask """
qt_path = tmpdir.mkdir("qt")
qt_path.ensure("include", "qt.h", file=True)
qt_path.ensure("lib", "QtCore4.lib", file=True)
qt_path.ensure("lib", "QtCored4.lib", file=True)
qt_path.ensure("bin", "QtCore4.dll", file=True)
qt_path.ensure("bin", "QtCored4.dll", file=True)
runtime_mask = qt_path.ensure("runtime.mask", file=True)
runtime_mask.write(b"""\n# headers\nexclude include/.*\n\n# .lib\nexclude lib/.*\\.lib\n""")
release_mask = qt_path.ensure("release.mask", file=True)
release_mask.write(b"""\nexclude bin/QtCored4.dll\n""")
package = qitoolchain.qipackage.QiPackage("qt", path=qt_path.strpath)
dest = tmpdir.join("dest")
package.install(dest.strpath, release=True, components=["runtime"])
assert dest.join("bin", "QtCore4.dll").check(file=True)
assert not dest.join("lib", "QtCored4.lib").check(file=True)
def test_include_in_mask(tmpdir):
""" Test Include in Mask """
qt_path = tmpdir.mkdir("qt")
qt_path.ensure("bin", "assitant.exe")
qt_path.ensure("bin", "moc.exe")
qt_path.ensure("bin", "lrelease.exe")
qt_path.ensure("bin", "lupdate.exe")
runtime_mask = qt_path.ensure("runtime.mask", file=True)
runtime_mask.write(b"""\nexclude bin/.*\\.exe\ninclude bin/lrelease.exe\ninclude bin/lupdate.exe\n""")
dest = tmpdir.join("dest")
package = qitoolchain.qipackage.QiPackage("qt", path=qt_path.strpath)
package.install(dest.strpath, release=True, components=["runtime"])
assert dest.join("bin", "lrelease.exe").check(file=True)
assert not dest.join("bin", "moc.exe").check(file=True)
def test_load_deps(tmpdir):
""" Test Load Dependencies """
libqi_path = tmpdir.mkdir("libqi")
libqi_path.ensure("package.xml").write(b"""
<package name="libqi">
<depends testtime="true" names="gtest" />
<depends runtime="true" names="boost python" />
</package>
""")
package = qitoolchain.qipackage.QiPackage("libqi", path=libqi_path.strpath)
package.load_deps()
assert package.build_depends == set()
assert package.run_depends == set(["boost", "python"])
assert package.test_depends == set(["gtest"])
def test_extract_legacy_bad_top_dir(tmpdir):
""" Test Extract Legacy Bad Top Dir """
src = tmpdir.mkdir("src")
boost = src.mkdir("boost")
boost.ensure("lib", "libboost.so", file=True)
res = qisys.archive.compress(boost.strpath)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True)
def test_extract_legacy_ok_top_dir(tmpdir):
""" Test Extract Legacy Ok Top Dir """
src = tmpdir.mkdir("src")
boost = src.mkdir("boost-1.55")
boost.ensure("lib", "libboost.so", file=True)
res = qisys.archive.compress(boost.strpath)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True)
def test_extract_modern(tmpdir):
""" Test Extract Modern """
src = tmpdir.mkdir("src")
src.ensure("package.xml", file=True)
src.ensure("lib", "libboost.so", file=True)
output = tmpdir.join("boost.zip")
res = qisys.archive.compress(src.strpath, output=output.strpath, flat=True)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True)
def test_installing_test_component(tmpdir):
""" Test Installing Test Component """
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
boost_path.ensure("package.xml", file=True)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
_installed = package.install(dest.strpath, components=["test", "runtime"])
assert not dest.join("include", "boost.h").check(file=True)
def test_get_set_license(tmpdir):
""" Test Get Set Licence """
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write("""\n<package name="boost" version="1.58" />\n""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
assert package.license is None
package.license = "BSD"
package2 = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
assert package2.license == "BSD"
def test_post_add_noop(tmpdir):
""" Test Post Add Noop """
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write("""\n<package name="boost" version="1.58" />\n""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.post_add() # no-op
def test_post_add_does_not_exist(tmpdir):
""" Test Post Add Does Not Exist """
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write(
b"""\n<package name="boost" version="1.58" post-add="asdf" />\n"""
)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.load_package_xml()
with pytest.raises(qisys.command.NotInPath):
package.post_add()
def test_version_str_to_int(tmpdir):
""" Test version converter """
assert qitoolchain.qipackage.version_str_to_int("1") == 1
assert qitoolchain.qipackage.version_str_to_int("1.0") == 10
assert qitoolchain.qipackage.version_str_to_int("1.0.2") == 102
assert qitoolchain.qipackage.version_str_to_int("1.5.4") == 154
assert qitoolchain.qipackage.version_str_to_int("1.5.0-r152") == 150
@skip_on_win
def test_post_add(tmpdir):
""" Test Post Add """
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write(
b"""\n<package name="boost" version="1.58" post-add="post-add.sh hello world" />\n"""
)
script = boost_path.join("post-add.sh")
script.write(
'#!/bin/sh\n'
'echo $@ > foobar\n'
)
os.chmod(script.strpath, 0o755)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.load_package_xml()
package.post_add()
with open(os.path.join(boost_path.strpath, 'foobar')) as f:
txt = f.read()
assert "hello world" in txt
| aldebaran/qibuild | python/qitoolchain/test/test_qipackage.py | Python | bsd-3-clause | 9,535 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_sigmoid" , "BinaryClass_100" , "sqlite")
| antoinecarme/sklearn2sql_heroku | tests/classification/BinaryClass_100/ws_BinaryClass_100_SVC_sigmoid_sqlite_code_gen.py | Python | bsd-3-clause | 142 |
# -*- coding: utf-8 -*-
__version__ = (0, 1, 0, 'final', 0)
| niwinz/django-greenqueue | version.py | Python | bsd-3-clause | 60 |
import json
from psycopg2.extras import Json
from django.contrib.postgres import forms, lookups
from django.core import exceptions
from django.db.models import Field, Transform
from django.utils.translation import ugettext_lazy as _
__all__ = ['JSONField']
class JSONField(Field):
empty_strings_allowed = False
description = _('A JSON object')
default_error_messages = {
'invalid': _("Value must be valid JSON."),
}
def db_type(self, connection):
return 'jsonb'
def get_transform(self, name):
transform = super(JSONField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def get_prep_value(self, value):
if value is not None:
return Json(value)
return value
def get_prep_lookup(self, lookup_type, value):
if lookup_type in ('has_key', 'has_keys', 'has_any_keys'):
return value
if isinstance(value, (dict, list)):
return Json(value)
return super(JSONField, self).get_prep_lookup(lookup_type, value)
def validate(self, value, model_instance):
super(JSONField, self).validate(value, model_instance)
try:
json.dumps(value)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.JSONField}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
JSONField.register_lookup(lookups.DataContains)
JSONField.register_lookup(lookups.ContainedBy)
JSONField.register_lookup(lookups.HasKey)
JSONField.register_lookup(lookups.HasKeys)
JSONField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if len(key_transforms) > 1:
return "{} #> %s".format(lhs), [key_transforms] + params
try:
int(self.key_name)
except ValueError:
lookup = "'%s'" % self.key_name
else:
lookup = "%s" % self.key_name
return "%s -> %s" % (lhs, lookup), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
| yephper/django | django/contrib/postgres/fields/jsonb.py | Python | bsd-3-clause | 3,093 |
files = [ "cocotb_prio3.vhd",
"cocotb_prio2.vhd",
"cocotb_wb_loopback.vhd",
]
| mkreider/cocotb2 | examples/wb/hdl/Manifest.py | Python | bsd-3-clause | 96 |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox.bindings.win_volume_object as win_volume_binding
from cybox.objects.volume_object import Volume
from cybox.common import BaseProperty, String
class WindowsDrive(BaseProperty):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsDriveType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
TYPE_DRIVE_UNKNOWN = "DRIVE_UNKNOWN"
TYPE_DRIVE_NO_ROOT_DIR = "DRIVE_NO_ROOT_DIR"
TYPE_DRIVE_REMOVABLE = "DRIVE_REMOVABLE"
TYPE_DRIVE_FIXED = "DRIVE_FIXED"
TYPE_DRIVE_REMOTE = "DRIVE_REMOTE"
TYPE_DRIVE_CDROM = "DRIVE_CDROM"
TYPE_DRIVE_RAMDISK = "DRIVE_RAMDISK"
class WindowsVolumeAttribute(BaseProperty):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsVolumeAttributeType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
TYPE_READ_ONLY = "ReadOnly"
TYPE_HIDDEN = "Hidden"
TYPE_NO_DEFAULT_DRIVE_LETTER = "NoDefaultDriveLetter"
TYPE_SHADOW_COPY = "ShadowCopy"
class WindowsVolumeAttributesList(entities.EntityList):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsVolumeAttributesListType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
attribute = fields.TypedField("Attribute", WindowsVolumeAttribute, multiple=True)
class WinVolume(Volume):
_binding = win_volume_binding
_binding_class = win_volume_binding.WindowsVolumeObjectType
_namespace = "http://cybox.mitre.org/objects#WinVolumeObject-2"
_XSI_NS = "WinVolumeObj"
_XSI_TYPE = "WindowsVolumeObjectType"
attributes_list = fields.TypedField("Attributes_List", WindowsVolumeAttributesList)
drive_letter = fields.TypedField("Drive_Letter", String)
drive_type = fields.TypedField("Drive_Type", WindowsDrive)
| CybOXProject/python-cybox | cybox/objects/win_volume_object.py | Python | bsd-3-clause | 1,947 |
#! /usr/bin/env python
def Test():
text ='hi from'
k = text + "call "
print k
return k
def euro():
print "high"
| peterheim1/robbie_ros | robbie_ai/nodes/aiml/know.py | Python | bsd-3-clause | 147 |
#! /usr/bin/env python
#----------------------------------------------------------------
# Author: Jason Gors <jasonDOTgorsATgmail>
# Creation Date: 07-30-2013
# Purpose: this is where the program is called into action.
#----------------------------------------------------------------
import argparse
import os
from os.path import join
import sys
import copy
from collections import OrderedDict
from Bep.core import usage
from Bep.core.release_info import __version__, name
from Bep.core import utils
from Bep.cmds import install, list_packages, remove_packages, turn_off, turn_on, update_packages
usr_home_dir = os.path.expanduser('~') # specifies the user's home dir
#top_level_dir = join(options['top_level_dir'], '.{}'.format(name))
top_level_dir = join(usr_home_dir, '.{}'.format(name))
installed_pkgs_dir = join(top_level_dir, 'installed_pkgs')
install_logs_dir = join(top_level_dir, '.install_logs')
install_dirs = dict(installed_pkgs_dir=installed_pkgs_dir, install_logs_dir=install_logs_dir)
#installation_db = 'installation_db.json'
#installation_db_path = join(top_level_dir, installation_db)
packages_file = '.{}_packages'.format(name)
packages_file_path = join(usr_home_dir, packages_file)
repo_choices = ['github', 'bitbucket', 'local'] # 'remote'
other_choices = ['packages'] # 'stable'
possible_choices = repo_choices + other_choices
def main(): # needs to be done as a main func for setuptools to work correctly in creating an executable
# for the approach i am taking here using nested subparsers:
# https://mail.python.org/pipermail/python-list/2010-August/585617.html
# nargs options:
# (default): by not specifying nargs at all, you just get a string of 1 item
# = N where N is some specified number of args
# = '?' makes a string of one item, and if no args are given, then default is used.
# = '*' makes a list of all args passed after command and if no args given, then default is used.
# = '+' makes list of all args passed after command, but requires at least one arg
top_parser = argparse.ArgumentParser(description=name.upper(),
formatter_class=argparse.RawDescriptionHelpFormatter,
#formatter_class=argparse.RawTextHelpFormatter,
#add_help=False,
epilog=usage.epilog_use)
#################################
### this goes at the top level
top_parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
top_parser.add_argument('-l', '--language', nargs='?', default='python', help=usage.lang_use)
group = top_parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true", help=usage.verbose_use)
group.add_argument("-q", "--quiet", action="store_true", help=usage.quiet_use)
#################################
def check_for_all_error(cmd_arg):
if cmd_arg in ['all', 'All', 'ALL', '--All', '--ALL']:
raise SystemExit("\nError: Did you mean to specifiy --all instead?")
# If --all is passed in:
# Skip stuff below if '--all' is specified w/ one of these accepted cmds
# (this is some seriously hacky brute force shit!)
build_up_subparsers = True
additional_args = []
cmds_that_accept_all_arg = ['update', 'remove', 'turn_off']
for cmd in cmds_that_accept_all_arg:
if cmd in sys.argv:
for i in sys.argv: # test for misspecified '--all' command
check_for_all_error(i)
if '--all' in sys.argv:
#print(sys.argv)
build_up_subparsers = False
# TODO add help page for all
top_parser.add_argument('--all', action='store_true', help=usage.all_use) #metavar="arg")
args = top_parser.parse_known_args()
args, additional_args = args
if len(additional_args) > 1: # this makes it so that it could only be len(additional_args)==1
error_all_arg = "--all can only be called with one of the following args:\n\t"
error_all_arg = error_all_arg + '{update, remove, turn_off}'
top_parser.error(error_all_arg)
#else:
#additional_args = additional_args[0]
# To display how to run a command:
# look at all pkgs and check that passed in package name is one that's already installed
everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)
any_of_this_pkg_already_installed = lambda pkg_to_process: utils.lang_and_pkg_type_and_pkg_and_branches_tuple(
pkg_to_process, everything_already_installed)
cmds_that_can_display_how_to = cmds_that_accept_all_arg + ['turn_on']
for cmd in cmds_that_can_display_how_to: # everything except install i think
if (cmd in sys.argv) and ('--all' not in sys.argv):
if ('-h' not in sys.argv) and ('--help' not in sys.argv):
args = top_parser.parse_known_args()
args, additional_args = args
if len(additional_args) == 2:
additional_args_copy = copy.copy(additional_args)
additional_args_copy.remove(cmd) # 2 things in here, one equal to cmd, the other is what we want to see if it's alreay installed
potential_pkg_to_proc = additional_args_copy[0]
#print any_of_this_pkg_already_installed(potential_pkg_to_proc)
if any_of_this_pkg_already_installed(potential_pkg_to_proc):
# should i make a function call out of this instead of relying on the command to be handled below?
print(" **** This is how to {} {} ****".format(cmd, potential_pkg_to_proc))
build_up_subparsers = False
elif potential_pkg_to_proc not in possible_choices: # else if the other arg/package name passed in is not a pkg_already_installed (& not one of the next possible cmd options)
#print an error say that whatever is passed in cannot be updated/turned_on/etc
#b/c it's not currently installed.
error_msg = "cannot {} {}: not a currently installed package.\n".format(cmd, potential_pkg_to_proc)
error_msg = error_msg + "[Execute `{} list` to see installed packages.]".format(name)
top_parser.error(error_msg)
#else: # want this instead b/c otherwise the above hides the help pages
#additional_args = [] # set back to empty to avoid the flag at the end of argparse stuff
#else:
#error_msg = "An already installed package name must be passed in with {}".format(cmd)
#top_parser.error(error_msg)
else:
additional_args = [] # set back to empty to avoid the flag at the end of argparse stuff
if build_up_subparsers:
top_subparser = top_parser.add_subparsers(title='Commands',
description='[ These are the commands that can be passed to %(prog)s ]',
#help=usage.subparser_use)
help='[ Command specific help info ]')
### create parser for the "list" command
# maybe make it so that it can list all branches installed for a specific pkg,
parser_list = top_subparser.add_parser('list', help=usage.list_use)
parser_list.add_argument('list_arg', action="store_true", help=usage.list_sub_use) #metavar="arg")
class CheckIfCanBeInstalled(argparse.Action):
''' makes sure a repo to install has both a user_name and repo_name:
eg. ipython/ipython
or is an actual path to a repo on the local filesystem'''
def __call__(self, parser, namespace, arg_value, option_string=None):
pkg_type = parser.prog.split(' ')[-1]
if utils.check_if_valid_pkg_to_install(arg_value, pkg_type):
setattr(namespace, self.dest, arg_value)
else:
if pkg_type == 'local':
error_msg = "\n\tIs not a path that exists on local filesystem."
raise parser.error(arg_value + error_msg)
else:
error_msg = '\nneed to make sure a username and repo_name are specified, like so:\n\tusername/repo_name'
raise parser.error(arg_value + error_msg)
##################################################
cmd_help = vars(usage.cmd_help)
for cmd in ['install', 'update', 'remove', 'turn_off', 'turn_on']:
if cmd == 'install':
install_parser = top_subparser.add_parser(cmd, help=usage.install_use.format(packages_file),
formatter_class=argparse.RawTextHelpFormatter)
install_parser.set_defaults(top_subparser=cmd)
install_subparser = install_parser.add_subparsers(dest='pkg_type', help=usage.install_sub_use.format(packages_file))
for c in repo_choices:
pkg_type_to_install = install_subparser.add_parser(c)
# pkg_type_to_install.set_defaults(pkg_type_to_install=c) # is the same as 'pkg_type' dest above
pkg_type_to_install.add_argument('pkg_to_install', # like ipython/ipython
action=CheckIfCanBeInstalled) # actions here to make sure it's legit
# local repos don't get to have a branch specified; a branch would need to be checked out first, then installed.
#if c != 'local':
#pkg_type_to_install.add_argument('-b', '--branch', dest='branch', default=None)#, action=CheckBranch) # the branch bit is filled out below
if c == 'github':
pkg_type_to_install.add_argument('repo_type', default='git', nargs='?')
elif c == 'bitbucket':
pkg_type_to_install.add_argument('repo_type', choices=['git', 'hg'])
# elif c == 'local': # just get the type of repo from the local filesystem so it doesn't have to be specified
# pkg_type_to_install.add_argument('repo_type', choices=['git', 'hg', 'bzr'])
#elif c == 'remote': # TODO not implemented but would be specified like so
#pkg_type_to_install.add_argument('repo_type', choices=['git', 'hg', 'bzr'])
pkg_type_to_install.add_argument('-b', '--branch', dest='branch', default=None)#, action=CheckBranch) # the branch bit is filled out below
for c in other_choices:
if c == 'packages':
pkg_type_to_install = install_subparser.add_parser(c, help=usage.packages_file_use.format(packages_file))
#elif c == 'stable': # TODO not implemented
#pkg_type_to_install = install_subparser.add_parser(c)
#pkg_type_to_install.add_argument('pkg_to_install') # like ipython
##pkg_type_to_install.add_argument('--pversion') # TODO like 1.2.1 (add this in later to install different version of a stable pkg)
# NOTE this seems like a better way to go in the future:
# install_parser.set_defaults(func=run_install)
# then run_install would be defined to run the install process (rather than having the conditionals below)
# def run_install(args):
# install_arg = args.install_arg # would be a list of pkgs or a string of the packages file
# ...process the install_arg to decide what to install
# ...then do the install
##################################################
else:
subparser_parser = top_subparser.add_parser(cmd, help=cmd_help['{}_use'.format(cmd)],
formatter_class=argparse.RawTextHelpFormatter)
subparser_parser.set_defaults(top_subparser=cmd)
### didn't work, not sure why yet
#all_dest = '{}_ALL'.format(cmd)
#subparser_parser.add_argument('--all',
##help=usage.remove_sub_use.format(name=name), # FIXME not sure why this wouldn't work
##action=CheckIfALL, action='store_true')
#cur_args = vars(top_parser.parse_args())
#print(cur_args)
#if 'all' in cur_args:
#if cur_args['all']:
#break
this_cmds_help = cmd_help['{}_sub_use'.format(cmd)].format(name=name)
subparsers_subparser = subparser_parser.add_subparsers(dest='pkg_type', help=this_cmds_help)
for c in repo_choices:
pkg_type_to_proc = subparsers_subparser.add_parser(c)
pkg_type_to_proc.add_argument('pkg_to_{}'.format(cmd)) # like ipython
pkg_type_to_proc.add_argument('-b', '--branch', dest='branch', default=None) # needs to be specified in script (for installs though it use default name if not specified)
#for c in other_choices: #TODO
##if c == 'packages': # packages args only used for installs
##pkg_type_to_proc = subparsers_subparser.add_parser(c)
#if c == 'stable':
#pkg_type_to_proc = subparsers_subparser.add_parser(c)
#pkg_type_to_proc.add_argument('pkg_to_{}'.format(cmd)) # like ipython
#pkg_type_to_proc.add_argument('--pversion', help='package version') # like 1.2.1 (default should be the newest, but can specify older ones)
##################################################
args = top_parser.parse_args()
# handle branches here
if ('top_subparser' in args) and (args.top_subparser == 'install'):
if ('branch' in args) and (args.branch == None):
if args.pkg_type == 'local': # for local, grab the currently checked out branch from the repo and set that as the branch to install
branch, repo_type = utils.get_checked_out_local_branch(args.pkg_to_install)
args.repo_type = repo_type
else:
branch = utils.get_default_branch(args.repo_type)
args.branch = branch
elif ('branch' in args) and (args.branch != None):
if args.pkg_type == 'local': # for local, don't allow branch to be specified; just use currently checked out branch
error_msg = "for `local` packages a branch cannot be specified;\n"
error_msg = error_msg + "check out the desired branch from the repo itself, then install."
raise top_parser.error(error_msg)
elif ('top_subparser' in args) and (args.top_subparser != 'install'):
if ('branch' in args) and (args.branch == None):
error_msg = 'need to make sure a branch is specified;\n'
error_msg = error_msg + "[Execute `{} list` to see installed packages and branches.]".format(name)
raise top_parser.error(error_msg)
class noise(object):
verbose = args.verbose
quiet = args.quiet
"""
# REMOVE LATER...this just shows what we're dealing with here
print('##########################################################')
print(args)
if additional_args:
print(additional_args)
print('##########################################################')
#raise SystemExit
"""
#--------------------------------------------------------------------------------------------------------------
if noise.quiet:
print('-'*60)
#######################################################################################################################
#### install pkg(s)
kwargs = dict(packages_file=packages_file, packages_file_path=packages_file_path,
noise=noise, install_dirs=install_dirs, installed_pkgs_dir=installed_pkgs_dir)
if ('top_subparser' in args) and (args.top_subparser == 'install'):
any_pkgs_processed = install.install_cmd(args, **kwargs)
#######################################################################################################################
#######################################################################################################################
#### if nothing is installed, then don't continue on to other commands (since they only process currently installed stuff)
everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)
if not everything_already_installed:
raise SystemExit('\nNo packages installed.')
#######################################################################################################################
#######################################################################################################################
#### list installed pkg(s) (by each package type)
elif 'list_arg' in args:
list_packages.list_cmd(everything_already_installed, noise)
#######################################################################################################################
#######################################################################################################################
# for everything else (update, remove, turn_on/off)
#elif args:
#elif ((('top_subparser' in args) and (args.top_subparser in ['update', 'remove', 'turn_on', 'turn_off'])) or
#(('update' in additional_args) or ('remove' in additional_args) or ('turn_off' in additional_args) or
#('turn_on' in additional_args))):
else: # FIXME not sure this is as good as it could be by just using else instead of something more specific
actions_to_take = {}
#top_level_any_pkgs_processed = False
for lang_dir_name, pkg_type_dict in everything_already_installed.items():
for pkg_type, pkgs_and_branches in pkg_type_dict.items():
any_pkgs_processed = False
#if pkgs_and_branches: # don't think i need this
pkgs_status = utils.pkgs_and_branches_for_pkg_type_status(pkgs_and_branches)
pkgs_and_branches_on = pkgs_status['pkg_branches_on']
pkgs_and_branches_off = pkgs_status['pkg_branches_off']
kwargs = dict(lang_dir_name=lang_dir_name, pkg_type=pkg_type, noise=noise, install_dirs=install_dirs,
pkgs_and_branches_on=pkgs_and_branches_on, pkgs_and_branches_off=pkgs_and_branches_off,
additional_args=additional_args, everything_already_installed=everything_already_installed)
if ('pkg_to_update' in args) or ('update' in additional_args):
any_pkgs_processed = update_packages.update_cmd(args, **kwargs)
elif ('pkg_to_remove' in args) or ('remove' in additional_args):
any_pkgs_processed = remove_packages.remove_cmd(args, **kwargs)
elif ('pkg_to_turn_off' in args) or ('turn_off' in additional_args):
any_pkgs_processed = turn_off.turn_off_cmd(args, **kwargs)
elif ('pkg_to_turn_on' in args) or ('turn_on' in additional_args):
any_pkgs_processed = turn_on.turn_on_cmd(args, **kwargs)
if any_pkgs_processed:
#top_level_any_pkgs_processed = True #+= 1
if type(any_pkgs_processed) == dict: # it will be a dict when a pkg didn't actually get processed, but has commands to get processed
actions_to_take.update(any_pkgs_processed)
#if not top_level_any_pkgs_processed: # NOTE KEEP for now, but i don't think this will ever get hit?
#utils.when_not_quiet_mode('\n[ No action performed ]'.format(pkg_type), noise.quiet)
if actions_to_take:
if len(actions_to_take) == 1:
alert, cmd = actions_to_take.items()[0]
option = '\n* {}\n{}\n'.format(alert, cmd)
print(option)
if not (cmd.startswith('****') and cmd.endswith('****')):
print('-'*60)
msg = "The above version is installed, would you like to run the\ncommand [y/N]? "
response = raw_input(msg)
if response:
response = response.lower()
if response in ['y', 'yes']:
utils.cmd_output(cmd)
elif response in ['n', 'no']:
print("\nBye then.")
else:
raise SystemExit("\nError: {}: not valid input".format(response))
else:
print("\nOk, bye then.")
elif len(actions_to_take) > 1:
actions_to_take_with_num_keys = {} # takes the alert, cmd (key, val) pairs from actions_to_take and makes them as a value tuple, w/ a num as each pair's key.
for num, alert_key in enumerate(actions_to_take, start=1): # actions_to_take is a dict with alert, cmd (key, val) pairs
actions_to_take_with_num_keys[num] = (alert_key, actions_to_take[alert_key])
actions_to_take_with_num_keys = OrderedDict(sorted(actions_to_take_with_num_keys.items(), key=lambda t: t[0])) # sorted by key (which are nums)
for num_key, alert_and_cmd_tuple_val in actions_to_take_with_num_keys.items():
if num_key == 1:
print('')
alert, cmd = alert_and_cmd_tuple_val
option = '{}. {}\n{}\n'.format(num_key, alert, cmd)
print(option)
print('-'*60)
msg = "The versions above are installed. If you'd like to run the command\n"
msg = msg + "for an item, enter the number (if not, then just hit enter to exit). "
response = raw_input(msg)
if response:
try:
response = int(response)
except ValueError:
raise SystemExit("\nError: invalid response: {}".format(response))
if response in range(1, len(actions_to_take_with_num_keys)+1):
#print response # now run the command
# Could either 1. open a subprocess and run from the command line -- easy way
# or 2. try to pass back into the the command that got us here -- better way
# Number 2 would involve something like this with updating the kwargs:
#kwargs = dict(lang_dir_name=lang_dir_name, pkg_type=pkg_type, noise=noise, install_dirs=install_dirs,
#pkgs_and_branches_on=pkgs_and_branches_on, pkgs_and_branches_off=pkgs_and_branches_off,
#additional_args=additional_args, everything_already_installed=everything_already_installed)
#actions.update_action(args, **kwargs)
# Doing number 1 above, just to get it working, though 2 would probably be better in long run.
cmd = actions_to_take_with_num_keys[response][1] # this gets the command from the alert, cmd tuple
if (cmd.startswith('****') and cmd.endswith('****')):
print("\nNo command to process,\n{}".format(cmd))
else:
utils.cmd_output(cmd)
else:
raise SystemExit("\nError: invalid response: {}".format(response))
else:
print("\nOk, bye then.")
| b-e-p/bep | Bep/run.py | Python | bsd-3-clause | 24,761 |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'myblog.views.home', name='home'),
url(r'^login/$', 'reg.views.loginView'),
url(r'^logout/$', 'reg.views.logoutView'),
)
| theicfire/djangofun | reg/urls.py | Python | bsd-3-clause | 232 |
"""Test config for channels"""
import pytest
@pytest.fixture(autouse=True)
def mock_search_tasks(mocker):
"""Patch the helpers so they don't fire celery tasks"""
return mocker.patch("channels.api.search_task_helpers")
| mitodl/open-discussions | channels/conftest.py | Python | bsd-3-clause | 228 |
# Copyright (c) 2016 Nokia, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
class ChangeData(object):
def __init__(self):
self.new = dict()
self.prev = dict()
def __str__(self):
'''returns simple dict representation of the mapping'''
return "new = " + str(self.new) + ", prev = " + str(self.prev)
class ObjBase(collections.MutableMapping):
'''
Mapping that works like both a dict and a mutable object, i.e.
d = ObjBase(foo='bar')
and
d.foo returns 'bar'
'''
# ``__init__`` method required to create instance from class.
def __init__(self, attributes=None):
'''Use the object dict'''
if (attributes is not None):
self.__dict__.update(attributes)
# The next five methods are requirements of the ABC.
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getitem__(self, key):
return self.__dict__[key]
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __str__(self):
'''returns simple dict representation of the mapping'''
return str(self.__dict__)
def __repr__(self):
'''echoes class, id, & reproducible representation in the REPL'''
return '{}, {}'.format(super(ObjBase, self).__repr__(), self.__dict__)
def update_attrs(self, new_attributes):
changes = ChangeData()
for key in new_attributes:
if key in self.__dict__:
if new_attributes[key] != self.__dict__[key]:
changes.prev[key] = self.__dict__[key]
self.__dict__[key] = new_attributes[key]
changes.new[key] = new_attributes[key]
else:
self.__dict__[key] = new_attributes[key]
changes.new[key] = new_attributes[key]
return changes
class Port(ObjBase):
def __init__(self, id, attributes=None):
super(self.__class__, self).__init__(attributes)
self.__dict__["__id"] = id
self.__dict__["__state"] = "Unbound" # "Unbound", "Bound", "InUse"
class DataObj(ObjBase):
def __init__(self, id, attributes=None):
super(self.__class__, self).__init__(attributes)
self.__dict__["__id"] = id
class Model(object):
def __init__(self):
self.ports = dict() # Port objects
| txdev/nuage_shim | nuage_shim/model.py | Python | bsd-3-clause | 3,042 |
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from django.template.loader import render_to_string
from django.conf import settings
from preferences.models import UserPreferences
from summaries.models import Unseen
from django.contrib.sites.models import Site
from optparse import make_option
from django.core.mail import EmailMultiAlternatives
class Command(BaseCommand):
args = 'daily | weekly | monthly'
help = 'Builds and sends summary mails for given period'
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry',
default=False,
help='Run without posting emails and writing them on stdout'),
)
def handle(self, *args, **options):
if not len(args) == 1:
raise CommandError("Give a period please")
period = args[0]
if not period in ("daily", "weekly", "monthly"):
raise CommandError("Period must be daily, weekly or monthly.")
users = [preference.user for preference in
UserPreferences.objects.filter(summary_mails=period)]
for user in users:
unseen_models = Unseen.objects.filter(user=user)
unseen_links = [unseen.link for unseen in unseen_models]
if unseen_links:
email_title = "%s new links for you:" % len(unseen_links)
email_body_txt = render_to_string("summaries/body.txt", {
"user": user,
"links": unseen_links,
"site": Site.objects.get_current()
})
email_body_html = render_to_string("summaries/body.html", {
"user": user,
"links": unseen_links,
"site": Site.objects.get_current()
})
email = EmailMultiAlternatives(
email_title,
email_body_txt,
"Linkfloyd %s" %settings.DEFAULT_FROM_EMAIL,
[user.email,])
email.attach_alternative(email_body_html, "text/html")
email.send()
self.stdout.write("Summary email for %s sent\n" % user)
if not options['dry']:
unseen_models.delete()
| linkfloyd/linkfloyd | linkfloyd/summaries/management/commands/send_summary_mails.py | Python | bsd-3-clause | 2,371 |
from django.core.urlresolvers import reverse
from django.db import models
from midnight_main.models import BaseTree, Base, BreadCrumbsMixin, BaseComment
from ckeditor.fields import RichTextField
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail import ImageField
from mptt.fields import TreeManyToManyField
class Section(BreadCrumbsMixin, BaseTree):
"""
Модель категории новостей
"""
title = models.CharField(max_length=255, verbose_name=_('Title'))
slug = models.SlugField(max_length=255, unique=True, verbose_name=_('Slug'))
sort = models.IntegerField(default=500, verbose_name=_('Sort'))
metatitle = models.CharField(max_length=2000, blank=True, verbose_name=_('Title'))
keywords = models.CharField(max_length=2000, blank=True, verbose_name=_('Keywords'))
description = models.CharField(max_length=2000, blank=True, verbose_name=_('Description'))
def get_absolute_url(self):
return reverse('midnight_news:news_list', kwargs={'slug': self.slug})
def __str__(self):
return self.title
class MPTTMeta:
order_insertion_by = ['sort']
class Meta:
verbose_name = _('NewsSection')
verbose_name_plural = _('NewsSections')
class News(Base):
"""
Модель новости
"""
title = models.CharField(max_length=255, verbose_name=_('Title'))
slug = models.SlugField(max_length=255, unique=True, verbose_name=_('Slug'))
date = models.DateField(verbose_name=_('Date'), blank=False)
sections = TreeManyToManyField(Section, verbose_name=_('Sections'))
image = ImageField(upload_to='news', verbose_name=_('Image'), blank=True)
annotation = models.TextField(blank=True, verbose_name=_('Annotation'))
text = RichTextField(blank=True, verbose_name=_('Text'))
comments = models.BooleanField(default=False, verbose_name=_('Comments'))
metatitle = models.CharField(max_length=2000, blank=True, verbose_name=_('Title'))
keywords = models.CharField(max_length=2000, blank=True, verbose_name=_('Keywords'))
description = models.CharField(max_length=2000, blank=True, verbose_name=_('Description'))
def get_absolute_url(self):
return reverse('midnight_news:news_detail', kwargs={'section_slug': self.sections.all()[0].slug, 'slug': self.slug})
def __str__(self):
return self.title
class Meta:
verbose_name = _('NewsItem')
verbose_name_plural = _('News')
class NewsComment(BaseComment):
"""
Модель комментария к новости
"""
obj = models.ForeignKey(News)
class Meta:
verbose_name = _('NewsComment')
verbose_name_plural = _('NewsComments')
| webadmin87/midnight | midnight_news/models.py | Python | bsd-3-clause | 2,757 |
import datetime
import logging
from functools import reduce
from flask_babelpkg import lazy_gettext
from .filters import Filters
log = logging.getLogger(__name__)
class BaseInterface(object):
"""
Base class for all data model interfaces.
Sub class it to implement your own interface for some data engine.
"""
obj = None
filter_converter_class = None
""" when sub classing override with your own custom filter converter """
""" Messages to display on CRUD Events """
add_row_message = lazy_gettext('Added Row')
edit_row_message = lazy_gettext('Changed Row')
delete_row_message = lazy_gettext('Deleted Row')
delete_integrity_error_message = lazy_gettext('Associated data exists, please delete them first')
add_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
edit_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
general_error_message = lazy_gettext('General Error')
""" Tuple with message and text with severity type ex: ("Added Row", "info") """
message = ()
def __init__(self, obj):
self.obj = obj
def _get_attr_value(self, item, col):
if not hasattr(item, col):
# it's an inner obj attr
return reduce(getattr, col.split('.'), item)
if hasattr(getattr(item, col), '__call__'):
# its a function
return getattr(item, col)()
else:
# its attribute
return getattr(item, col)
def get_filters(self, search_columns=None):
search_columns = search_columns or []
return Filters(self.filter_converter_class, self, search_columns)
def get_values_item(self, item, show_columns):
return [self._get_attr_value(item, col) for col in show_columns]
def _get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
retlst = []
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
retlst.append(retdict)
return retlst
def get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
yield retdict
def get_values_json(self, lst, list_columns):
"""
Converts list of objects from query to JSON
"""
result = []
for item in self.get_values(lst, list_columns):
for key, value in list(item.items()):
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = value.isoformat()
item[key] = value
if isinstance(value, list):
item[key] = [str(v) for v in value]
result.append(item)
return result
"""
Returns the models class name
useful for auto title on views
"""
@property
def model_name(self):
return self.obj.__class__.__name__
"""
Next methods must be overridden
"""
def query(self, filters=None, order_column='', order_direction='',
page=None, page_size=None):
pass
def is_image(self, col_name):
return False
def is_file(self, col_name):
return False
def is_gridfs_file(self, col_name):
return False
def is_gridfs_image(self, col_name):
return False
def is_string(self, col_name):
return False
def is_text(self, col_name):
return False
def is_integer(self, col_name):
return False
def is_float(self, col_name):
return False
def is_boolean(self, col_name):
return False
def is_date(self, col_name):
return False
def is_datetime(self, col_name):
return False
def is_relation(self, prop):
return False
def is_relation_col(self, col):
return False
def is_relation_many_to_one(self, prop):
return False
def is_relation_many_to_many(self, prop):
return False
def is_relation_one_to_one(self, prop):
return False
def is_relation_one_to_many(self, prop):
return False
def is_nullable(self, col_name):
return True
def is_unique(self, col_name):
return False
def is_pk(self, col_name):
return False
def is_fk(self, col_name):
return False
def get_max_length(self, col_name):
return -1
def get_min_length(self, col_name):
return -1
"""
-----------------------------------------
FUNCTIONS FOR CRUD OPERATIONS
-----------------------------------------
"""
def add(self, item):
"""
Adds object
"""
raise NotImplementedError
def edit(self, item):
"""
Edit (change) object
"""
raise NotImplementedError
def delete(self, item):
"""
Deletes object
"""
raise NotImplementedError
def get_col_default(self, col_name):
pass
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
def get_pk_name(self, item):
"""
Returns the primary key name
"""
raise NotImplementedError
def get_pk_value(self, item):
return getattr(item, self.get_pk_name())
def get(self, pk):
"""
return the record from key
"""
pass
def get_related_model(self, prop):
raise NotImplementedError
def get_related_interface(self, col_name):
"""
Returns a BaseInterface for the related model
of column name.
:param col_name: Column name with relation
:return: BaseInterface
"""
raise NotImplementedError
def get_related_obj(self, col_name, value):
raise NotImplementedError
def get_related_fk(self, model):
raise NotImplementedError
def get_columns_list(self):
"""
Returns a list of all the columns names
"""
return []
def get_user_columns_list(self):
"""
Returns a list of user viewable columns names
"""
return self.get_columns_list()
def get_search_columns_list(self):
"""
Returns a list of searchable columns names
"""
return []
def get_order_columns_list(self, list_columns=None):
"""
Returns a list of order columns names
"""
return []
def get_relation_fk(self, prop):
pass
| rpiotti/Flask-AppBuilder | flask_appbuilder/models/base.py | Python | bsd-3-clause | 7,479 |
# -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from cookie_flaskApp.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self):
user = UserFactory(password="myprecious")
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles | sakhuja/cookie_lover | tests/test_models.py | Python | bsd-3-clause | 1,655 |
# -*- coding: utf-8 -*-
"""Test gui."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import os
import shutil
from ..state import GUIState, _gui_state_path, _get_default_state_path
from phylib.utils import Bunch, load_json, save_json
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Test GUI state
#------------------------------------------------------------------------------
class MyClass(object):
pass
def test_get_default_state_path():
assert str(_get_default_state_path(MyClass())).endswith(
os.sep.join(('gui', 'tests', 'static', 'state.json')))
def test_gui_state_view_1(tempdir):
view = Bunch(name='MyView0')
path = _gui_state_path('GUI', tempdir)
state = GUIState(path)
state.update_view_state(view, dict(hello='world'))
assert not state.get_view_state(Bunch(name='MyView'))
assert not state.get_view_state(Bunch(name='MyView (1)'))
assert state.get_view_state(view) == Bunch(hello='world')
state.save()
# Copy the state.json to a "default" location.
default_path = tempdir / 'state.json'
shutil.copy(state._path, default_path)
state._path.unlink()
logger.info("Create new GUI state.")
# The default state.json should be automatically copied and loaded.
state = GUIState(path, default_state_path=default_path)
assert state.MyView0.hello == 'world'
def test_gui_state_view_2(tempdir):
global_path = tempdir / 'global/state.json'
local_path = tempdir / 'local/state.json'
data = {'a': {'b': 2, 'c': 3}}
# Keep the entire dictionary with 'a' key.
state = GUIState(global_path, local_path=local_path, local_keys=('a.d',))
state.update(data)
state.save()
# Local and global files are identical.
assert load_json(global_path) == data
assert load_json(local_path) == {}
state = GUIState(global_path, local_path=local_path, local_keys=('a.d',))
assert state == data
def test_gui_state_view_3(tempdir):
global_path = tempdir / 'global/state.json'
local_path = tempdir / 'local/state.json'
data = {'a': {'b': 2, 'c': 3}}
state = GUIState(global_path, local_path=local_path)
state.add_local_keys(['a.b'])
state.update(data)
state.save()
assert load_json(global_path) == {'a': {'c': 3}}
# Only kept key 'b'.
assert load_json(local_path) == {'a': {'b': 2}}
# Update the JSON
save_json(local_path, {'a': {'b': 3}})
state = GUIState(global_path, local_path=local_path, local_keys=('a.b',))
data_1 = {'a': {'b': 3, 'c': 3}}
assert state == data_1
assert state._local_data == {'a': {'b': 3}}
| kwikteam/phy | phy/gui/tests/test_state.py | Python | bsd-3-clause | 2,810 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
C/C++ preprocessor for finding dependencies
Reasons for using the Waf preprocessor by default
#. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files)
#. Not all compilers provide .d files for obtaining the dependencies (portability)
#. A naive file scanner will not catch the constructs such as "#include foo()"
#. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything)
Regarding the speed concerns:
* the preprocessing is performed only when files must be compiled
* the macros are evaluated only for #if/#elif/#include
* system headers are not scanned by default
Now if you do not want the Waf preprocessor, the tool +gccdeps* uses the .d files produced
during the compilation to track the dependencies (useful when used with the boost libraries).
It only works with gcc >= 4.4 though.
A dumb preprocessor is also available in the tool *c_dumbpreproc*
"""
# TODO: more varargs, pragma once
import re, string, traceback
from waflib import Logs, Utils, Errors
from waflib.Logs import debug, error
class PreprocError(Errors.WafError):
pass
POPFILE = '-'
"Constant representing a special token used in :py:meth:`waflib.Tools.c_preproc.c_parser.start` iteration to switch to a header read previously"
recursion_limit = 150
"Limit on the amount of files to read in the dependency scanner"
go_absolute = False
"Set to True to track headers on files in /usr/include, else absolute paths are ignored (but it becomes very slow)"
standard_includes = ['/usr/include']
if Utils.is_win32:
standard_includes = []
use_trigraphs = 0
"""Apply trigraph rules (False by default)"""
strict_quotes = 0
"""Reserve the "#include <>" quotes for system includes (do not search for those includes). False by default."""
g_optrans = {
'not':'!',
'and':'&&',
'bitand':'&',
'and_eq':'&=',
'or':'||',
'bitor':'|',
'or_eq':'|=',
'xor':'^',
'xor_eq':'^=',
'compl':'~',
}
"""Operators such as and/or/xor for c++. Set an empty dict to disable."""
# ignore #warning and #error
re_lines = re.compile(
'^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',
re.IGNORECASE | re.MULTILINE)
"""Match #include lines"""
re_mac = re.compile("^[a-zA-Z_]\w*")
"""Match macro definitions"""
re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
"""Match macro functions"""
re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE)
"""Match #pragma once statements"""
re_nl = re.compile('\\\\\r*\n', re.MULTILINE)
"""Match newlines"""
re_cpp = re.compile(r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE )
"""Filter C/C++ comments"""
trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')]
"""Trigraph definitions"""
chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39}
"""Escape characters"""
NUM = 'i'
"""Number token"""
OP = 'O'
"""Operator token"""
IDENT = 'T'
"""Identifier token"""
STR = 's'
"""String token"""
CHAR = 'c'
"""Character token"""
tok_types = [NUM, STR, IDENT, OP]
"""Token types"""
exp_types = [
r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""",
r'L?"([^"\\]|\\.)*"',
r'[a-zA-Z_]\w*',
r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',
]
"""Expression types"""
re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M)
"""Match expressions into tokens"""
accepted = 'a'
"""Parser state is *accepted*"""
ignored = 'i'
"""Parser state is *ignored*, for example preprocessor lines in an #if 0 block"""
undefined = 'u'
"""Parser state is *undefined* at the moment"""
skipped = 's'
"""Parser state is *skipped*, for example preprocessor lines in a #elif 0 block"""
def repl(m):
"""Replace function used with :py:attr:`waflib.Tools.c_preproc.re_cpp`"""
s = m.group(0)
if s.startswith('/'):
return ' '
return s
def filter_comments(filename):
"""
Filter the comments from a c/h file, and return the preprocessor lines.
The regexps :py:attr:`waflib.Tools.c_preproc.re_cpp`, :py:attr:`waflib.Tools.c_preproc.re_nl` and :py:attr:`waflib.Tools.c_preproc.re_lines` are used internally.
:return: the preprocessor directives as a list of (keyword, line)
:rtype: a list of string pairs
"""
# return a list of tuples : keyword, line
code = Utils.readf(filename)
if use_trigraphs:
for (a, b) in trig_def: code = code.split(a).join(b)
code = re_nl.sub('', code)
code = re_cpp.sub(repl, code)
return [(m.group(2), m.group(3)) for m in re.finditer(re_lines, code)]
prec = {}
"""
Operator precendence rules required for parsing expressions of the form::
#if 1 && 2 != 0
"""
ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ',']
for x in range(len(ops)):
syms = ops[x]
for u in syms.split():
prec[u] = x
def trimquotes(s):
"""
Remove the single quotes around an expression::
trimquotes("'test'") == "test"
:param s: expression to transform
:type s: string
:rtype: string
"""
if not s: return ''
s = s.rstrip()
if s[0] == "'" and s[-1] == "'": return s[1:-1]
return s
def reduce_nums(val_1, val_2, val_op):
"""
Apply arithmetic rules to compute a result
:param val1: input parameter
:type val1: int or string
:param val2: input parameter
:type val2: int or string
:param val_op: C operator in *+*, */*, *-*, etc
:type val_op: string
:rtype: int
"""
#print val_1, val_2, val_op
# now perform the operation, make certain a and b are numeric
try: a = 0 + val_1
except TypeError: a = int(val_1)
try: b = 0 + val_2
except TypeError: b = int(val_2)
d = val_op
if d == '%': c = a%b
elif d=='+': c = a+b
elif d=='-': c = a-b
elif d=='*': c = a*b
elif d=='/': c = a/b
elif d=='^': c = a^b
elif d=='|': c = a|b
elif d=='||': c = int(a or b)
elif d=='&': c = a&b
elif d=='&&': c = int(a and b)
elif d=='==': c = int(a == b)
elif d=='!=': c = int(a != b)
elif d=='<=': c = int(a <= b)
elif d=='<': c = int(a < b)
elif d=='>': c = int(a > b)
elif d=='>=': c = int(a >= b)
elif d=='^': c = int(a^b)
elif d=='<<': c = a<<b
elif d=='>>': c = a>>b
else: c = 0
return c
def get_num(lst):
"""
Try to obtain a number from a list of tokens. The token types are defined in :py:attr:`waflib.Tools.ccroot.tok_types`.
:param lst: list of preprocessor tokens
:type lst: list of tuple (tokentype, value)
:return: a pair containing the number and the rest of the list
:rtype: tuple(value, list)
"""
if not lst: raise PreprocError("empty list for get_num")
(p, v) = lst[0]
if p == OP:
if v == '(':
count_par = 1
i = 1
while i < len(lst):
(p, v) = lst[i]
if p == OP:
if v == ')':
count_par -= 1
if count_par == 0:
break
elif v == '(':
count_par += 1
i += 1
else:
raise PreprocError("rparen expected %r" % lst)
(num, _) = get_term(lst[1:i])
return (num, lst[i+1:])
elif v == '+':
return get_num(lst[1:])
elif v == '-':
num, lst = get_num(lst[1:])
return (reduce_nums('-1', num, '*'), lst)
elif v == '!':
num, lst = get_num(lst[1:])
return (int(not int(num)), lst)
elif v == '~':
num, lst = get_num(lst[1:])
return (~ int(num), lst)
else:
raise PreprocError("Invalid op token %r for get_num" % lst)
elif p == NUM:
return v, lst[1:]
elif p == IDENT:
# all macros should have been replaced, remaining identifiers eval to 0
return 0, lst[1:]
else:
raise PreprocError("Invalid token %r for get_num" % lst)
def get_term(lst):
"""
Evaluate an expression recursively, for example::
1+1+1 -> 2+1 -> 3
:param lst: list of tokens
:type lst: list of tuple(token, value)
:return: the value and the remaining tokens
:rtype: value, list
"""
if not lst: raise PreprocError("empty list for get_term")
num, lst = get_num(lst)
if not lst:
return (num, [])
(p, v) = lst[0]
if p == OP:
if v == ',':
# skip
return get_term(lst[1:])
elif v == '?':
count_par = 0
i = 1
while i < len(lst):
(p, v) = lst[i]
if p == OP:
if v == ')':
count_par -= 1
elif v == '(':
count_par += 1
elif v == ':':
if count_par == 0:
break
i += 1
else:
raise PreprocError("rparen expected %r" % lst)
if int(num):
return get_term(lst[1:i])
else:
return get_term(lst[i+1:])
else:
num2, lst = get_num(lst[1:])
if not lst:
# no more tokens to process
num2 = reduce_nums(num, num2, v)
return get_term([(NUM, num2)] + lst)
# operator precedence
p2, v2 = lst[0]
if p2 != OP:
raise PreprocError("op expected %r" % lst)
if prec[v2] >= prec[v]:
num2 = reduce_nums(num, num2, v)
return get_term([(NUM, num2)] + lst)
else:
num3, lst = get_num(lst[1:])
num3 = reduce_nums(num2, num3, v2)
return get_term([(NUM, num), (p, v), (NUM, num3)] + lst)
raise PreprocError("cannot reduce %r" % lst)
def reduce_eval(lst):
"""
Take a list of tokens and output true or false for #if/#elif conditions.
:param lst: a list of tokens
:type lst: list of tuple(token, value)
:return: a token
:rtype: tuple(NUM, int)
"""
num, lst = get_term(lst)
return (NUM, num)
def stringize(lst):
"""
Merge a list of tokens into a string
:param lst: a list of tokens
:type lst: list of tuple(token, value)
:rtype: string
"""
lst = [str(v2) for (p2, v2) in lst]
return "".join(lst)
def paste_tokens(t1, t2):
"""
Token pasting works between identifiers, particular operators, and identifiers and numbers::
a ## b -> ab
> ## = -> >=
a ## 2 -> a2
:param t1: token
:type t1: tuple(type, value)
:param t2: token
:type t2: tuple(type, value)
"""
p1 = None
if t1[0] == OP and t2[0] == OP:
p1 = OP
elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM):
p1 = IDENT
elif t1[0] == NUM and t2[0] == NUM:
p1 = NUM
if not p1:
raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2))
return (p1, t1[1] + t2[1])
def reduce_tokens(lst, defs, ban=[]):
"""
Replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied
:param lst: list of tokens
:type lst: list of tuple(token, value)
:param defs: macro definitions
:type defs: dict
:param ban: macros that cannot be substituted (recursion is not allowed)
:type ban: list of string
:return: the new list of tokens
:rtype: value, list
"""
i = 0
while i < len(lst):
(p, v) = lst[i]
if p == IDENT and v == "defined":
del lst[i]
if i < len(lst):
(p2, v2) = lst[i]
if p2 == IDENT:
if v2 in defs:
lst[i] = (NUM, 1)
else:
lst[i] = (NUM, 0)
elif p2 == OP and v2 == '(':
del lst[i]
(p2, v2) = lst[i]
del lst[i] # remove the ident, and change the ) for the value
if v2 in defs:
lst[i] = (NUM, 1)
else:
lst[i] = (NUM, 0)
else:
raise PreprocError("Invalid define expression %r" % lst)
elif p == IDENT and v in defs:
if isinstance(defs[v], str):
a, b = extract_macro(defs[v])
defs[v] = b
macro_def = defs[v]
to_add = macro_def[1]
if isinstance(macro_def[0], list):
# macro without arguments
del lst[i]
accu = to_add[:]
reduce_tokens(accu, defs, ban+[v])
for x in range(len(accu)):
lst.insert(i, accu[x])
i += 1
else:
# collect the arguments for the funcall
args = []
del lst[i]
if i >= len(lst):
raise PreprocError("expected '(' after %r (got nothing)" % v)
(p2, v2) = lst[i]
if p2 != OP or v2 != '(':
raise PreprocError("expected '(' after %r" % v)
del lst[i]
one_param = []
count_paren = 0
while i < len(lst):
p2, v2 = lst[i]
del lst[i]
if p2 == OP and count_paren == 0:
if v2 == '(':
one_param.append((p2, v2))
count_paren += 1
elif v2 == ')':
if one_param: args.append(one_param)
break
elif v2 == ',':
if not one_param: raise PreprocError("empty param in funcall %s" % v)
args.append(one_param)
one_param = []
else:
one_param.append((p2, v2))
else:
one_param.append((p2, v2))
if v2 == '(': count_paren += 1
elif v2 == ')': count_paren -= 1
else:
raise PreprocError('malformed macro')
# substitute the arguments within the define expression
accu = []
arg_table = macro_def[0]
j = 0
while j < len(to_add):
(p2, v2) = to_add[j]
if p2 == OP and v2 == '#':
# stringize is for arguments only
if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
toks = args[arg_table[to_add[j+1][1]]]
accu.append((STR, stringize(toks)))
j += 1
else:
accu.append((p2, v2))
elif p2 == OP and v2 == '##':
# token pasting, how can man invent such a complicated system?
if accu and j+1 < len(to_add):
# we have at least two tokens
t1 = accu[-1]
if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
toks = args[arg_table[to_add[j+1][1]]]
if toks:
accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1])
accu.extend(toks[1:])
else:
# error, case "a##"
accu.append((p2, v2))
accu.extend(toks)
elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__':
# TODO not sure
# first collect the tokens
va_toks = []
st = len(macro_def[0])
pt = len(args)
for x in args[pt-st+1:]:
va_toks.extend(x)
va_toks.append((OP, ','))
if va_toks: va_toks.pop() # extra comma
if len(accu)>1:
(p3, v3) = accu[-1]
(p4, v4) = accu[-2]
if v3 == '##':
# remove the token paste
accu.pop()
if v4 == ',' and pt < st:
# remove the comma
accu.pop()
accu += va_toks
else:
accu[-1] = paste_tokens(t1, to_add[j+1])
j += 1
else:
# Invalid paste, case "##a" or "b##"
accu.append((p2, v2))
elif p2 == IDENT and v2 in arg_table:
toks = args[arg_table[v2]]
reduce_tokens(toks, defs, ban+[v])
accu.extend(toks)
else:
accu.append((p2, v2))
j += 1
reduce_tokens(accu, defs, ban+[v])
for x in range(len(accu)-1, -1, -1):
lst.insert(i, accu[x])
i += 1
def eval_macro(lst, defs):
"""
Reduce the tokens by :py:func:`waflib.Tools.c_preproc.reduce_tokens` and try to return a 0/1 result by :py:func:`waflib.Tools.c_preproc.reduce_eval`.
:param lst: list of tokens
:type lst: list of tuple(token, value)
:param defs: macro definitions
:type defs: dict
:rtype: int
"""
reduce_tokens(lst, defs, [])
if not lst: raise PreprocError("missing tokens to evaluate")
(p, v) = reduce_eval(lst)
return int(v) != 0
def extract_macro(txt):
"""
Process a macro definition of the form::
#define f(x, y) x * y
into a function or a simple macro without arguments
:param txt: expression to exact a macro definition from
:type txt: string
:return: a tuple containing the name, the list of arguments and the replacement
:rtype: tuple(string, [list, list])
"""
t = tokenize(txt)
if re_fun.search(txt):
p, name = t[0]
p, v = t[1]
if p != OP: raise PreprocError("expected open parenthesis")
i = 1
pindex = 0
params = {}
prev = '('
while 1:
i += 1
p, v = t[i]
if prev == '(':
if p == IDENT:
params[v] = pindex
pindex += 1
prev = p
elif p == OP and v == ')':
break
else:
raise PreprocError("unexpected token (3)")
elif prev == IDENT:
if p == OP and v == ',':
prev = v
elif p == OP and v == ')':
break
else:
raise PreprocError("comma or ... expected")
elif prev == ',':
if p == IDENT:
params[v] = pindex
pindex += 1
prev = p
elif p == OP and v == '...':
raise PreprocError("not implemented (1)")
else:
raise PreprocError("comma or ... expected (2)")
elif prev == '...':
raise PreprocError("not implemented (2)")
else:
raise PreprocError("unexpected else")
#~ print (name, [params, t[i+1:]])
return (name, [params, t[i+1:]])
else:
(p, v) = t[0]
if len(t) > 1:
return (v, [[], t[1:]])
else:
# empty define, assign an empty token
return (v, [[], [('T','')]])
re_include = re.compile('^\s*(<(?P<a>.*)>|"(?P<b>.*)")')
def extract_include(txt, defs):
"""
Process a line in the form::
#include foo
:param txt: include line to process
:type txt: string
:param defs: macro definitions
:type defs: dict
:return: the file name
:rtype: string
"""
m = re_include.search(txt)
if m:
if m.group('a'): return '<', m.group('a')
if m.group('b'): return '"', m.group('b')
# perform preprocessing and look at the result, it must match an include
toks = tokenize(txt)
reduce_tokens(toks, defs, ['waf_include'])
if not toks:
raise PreprocError("could not parse include %s" % txt)
if len(toks) == 1:
if toks[0][0] == STR:
return '"', toks[0][1]
else:
if toks[0][1] == '<' and toks[-1][1] == '>':
return stringize(toks).lstrip('<').rstrip('>')
raise PreprocError("could not parse include %s." % txt)
def parse_char(txt):
"""
Parse a c character
:param txt: character to parse
:type txt: string
:return: a character literal
:rtype: string
"""
if not txt: raise PreprocError("attempted to parse a null char")
if txt[0] != '\\':
return ord(txt)
c = txt[1]
if c == 'x':
if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16)
return int(txt[2:], 16)
elif c.isdigit():
if c == '0' and len(txt)==2: return 0
for i in 3, 2, 1:
if len(txt) > i and txt[1:1+i].isdigit():
return (1+i, int(txt[1:1+i], 8))
else:
try: return chr_esc[c]
except KeyError: raise PreprocError("could not parse char literal '%s'" % txt)
def tokenize(s):
"""
Convert a string into a list of tokens (shlex.split does not apply to c/c++/d)
:param s: input to tokenize
:type s: string
:return: a list of tokens
:rtype: list of tuple(token, value)
"""
return tokenize_private(s)[:] # force a copy of the results
@Utils.run_once
def tokenize_private(s):
ret = []
for match in re_clexer.finditer(s):
m = match.group
for name in tok_types:
v = m(name)
if v:
if name == IDENT:
try: v = g_optrans[v]; name = OP
except KeyError:
# c++ specific
if v.lower() == "true":
v = 1
name = NUM
elif v.lower() == "false":
v = 0
name = NUM
elif name == NUM:
if m('oct'): v = int(v, 8)
elif m('hex'): v = int(m('hex'), 16)
elif m('n0'): v = m('n0')
else:
v = m('char')
if v: v = parse_char(v)
else: v = m('n2') or m('n4')
elif name == OP:
if v == '%:': v = '#'
elif v == '%:%:': v = '##'
elif name == STR:
# remove the quotes around the string
v = v[1:-1]
ret.append((name, v))
break
return ret
@Utils.run_once
def define_name(line):
"""
:param line: define line
:type line: string
:rtype: string
:return: the define name
"""
return re_mac.match(line).group(0)
class c_parser(object):
"""
Used by :py:func:`waflib.Tools.c_preproc.scan` to parse c/h files. Note that by default,
only project headers are parsed.
"""
def __init__(self, nodepaths=None, defines=None):
self.lines = []
"""list of lines read"""
if defines is None:
self.defs = {}
else:
self.defs = dict(defines) # make a copy
self.state = []
self.count_files = 0
self.currentnode_stack = []
self.nodepaths = nodepaths or []
"""Include paths"""
self.nodes = []
"""List of :py:class:`waflib.Node.Node` found so far"""
self.names = []
"""List of file names that could not be matched by any file"""
self.curfile = ''
"""Current file"""
self.ban_includes = set([])
"""Includes that must not be read (#pragma once)"""
def cached_find_resource(self, node, filename):
"""
Find a file from the input directory
:param node: directory
:type node: :py:class:`waflib.Node.Node`
:param filename: header to find
:type filename: string
:return: the node if found, or None
:rtype: :py:class:`waflib.Node.Node`
"""
try:
nd = node.ctx.cache_nd
except AttributeError:
nd = node.ctx.cache_nd = {}
tup = (node, filename)
try:
return nd[tup]
except KeyError:
ret = node.find_resource(filename)
if ret:
if getattr(ret, 'children', None):
ret = None
elif ret.is_child_of(node.ctx.bldnode):
tmp = node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode))
if tmp and getattr(tmp, 'children', None):
ret = None
nd[tup] = ret
return ret
def tryfind(self, filename):
"""
Try to obtain a node from the filename based from the include paths. Will add
the node found to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` or the file name to
:py:attr:`waflib.Tools.c_preproc.c_parser.names` if no corresponding file is found. Called by
:py:attr:`waflib.Tools.c_preproc.c_parser.start`.
:param filename: header to find
:type filename: string
:return: the node if found
:rtype: :py:class:`waflib.Node.Node`
"""
self.curfile = filename
# for msvc it should be a for loop on the whole stack
found = self.cached_find_resource(self.currentnode_stack[-1], filename)
for n in self.nodepaths:
if found:
break
found = self.cached_find_resource(n, filename)
if found and not found in self.ban_includes:
# TODO the duplicates do not increase the no-op build times too much, but they may be worth removing
self.nodes.append(found)
if filename[-4:] != '.moc':
self.addlines(found)
else:
if not filename in self.names:
self.names.append(filename)
return found
def addlines(self, node):
"""
Add the lines from a header in the list of preprocessor lines to parse
:param node: header
:type node: :py:class:`waflib.Node.Node`
"""
self.currentnode_stack.append(node.parent)
filepath = node.abspath()
self.count_files += 1
if self.count_files > recursion_limit:
# issue #812
raise PreprocError("recursion limit exceeded")
pc = self.parse_cache
debug('preproc: reading file %r', filepath)
try:
lns = pc[filepath]
except KeyError:
pass
else:
self.lines.extend(lns)
return
try:
lines = filter_comments(filepath)
lines.append((POPFILE, ''))
lines.reverse()
pc[filepath] = lines # cache the lines filtered
self.lines.extend(lines)
except IOError:
raise PreprocError("could not read the file %s" % filepath)
except Exception:
if Logs.verbose > 0:
error("parsing %s failed" % filepath)
traceback.print_exc()
def start(self, node, env):
"""
Preprocess a source file to obtain the dependencies, which are accumulated to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes`
and :py:attr:`waflib.Tools.c_preproc.c_parser.names`.
:param node: source file
:type node: :py:class:`waflib.Node.Node`
:param env: config set containing additional defines to take into account
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
"""
debug('preproc: scanning %s (in %s)', node.name, node.parent.name)
bld = node.ctx
try:
self.parse_cache = bld.parse_cache
except AttributeError:
bld.parse_cache = {}
self.parse_cache = bld.parse_cache
self.current_file = node
self.addlines(node)
# macros may be defined on the command-line, so they must be parsed as if they were part of the file
if env['DEFINES']:
try:
lst = ['%s %s' % (x[0], trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in env['DEFINES']]]
lst.reverse()
self.lines.extend([('define', x) for x in lst])
except AttributeError:
# if the defines are invalid the compiler will tell the user
pass
while self.lines:
(token, line) = self.lines.pop()
if token == POPFILE:
self.count_files -= 1
self.currentnode_stack.pop()
continue
try:
ve = Logs.verbose
if ve: debug('preproc: line is %s - %s state is %s', token, line, self.state)
state = self.state
# make certain we define the state if we are about to enter in an if block
if token[:2] == 'if':
state.append(undefined)
elif token == 'endif':
state.pop()
# skip lines when in a dead 'if' branch, wait for the endif
if token[0] != 'e':
if skipped in self.state or ignored in self.state:
continue
if token == 'if':
ret = eval_macro(tokenize(line), self.defs)
if ret: state[-1] = accepted
else: state[-1] = ignored
elif token == 'ifdef':
m = re_mac.match(line)
if m and m.group(0) in self.defs: state[-1] = accepted
else: state[-1] = ignored
elif token == 'ifndef':
m = re_mac.match(line)
if m and m.group(0) in self.defs: state[-1] = ignored
else: state[-1] = accepted
elif token == 'include' or token == 'import':
(kind, inc) = extract_include(line, self.defs)
if ve: debug('preproc: include found %s (%s) ', inc, kind)
if kind == '"' or not strict_quotes:
self.current_file = self.tryfind(inc)
if token == 'import':
self.ban_includes.add(self.current_file)
elif token == 'elif':
if state[-1] == accepted:
state[-1] = skipped
elif state[-1] == ignored:
if eval_macro(tokenize(line), self.defs):
state[-1] = accepted
elif token == 'else':
if state[-1] == accepted: state[-1] = skipped
elif state[-1] == ignored: state[-1] = accepted
elif token == 'define':
try:
self.defs[define_name(line)] = line
except Exception:
raise PreprocError("Invalid define line %s" % line)
elif token == 'undef':
m = re_mac.match(line)
if m and m.group(0) in self.defs:
self.defs.__delitem__(m.group(0))
#print "undef %s" % name
elif token == 'pragma':
if re_pragma_once.match(line.lower()):
self.ban_includes.add(self.current_file)
except Exception as e:
if Logs.verbose:
debug('preproc: line parsing failed (%s): %s %s', e, line, Utils.ex_stack())
def scan(task):
"""
Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind::
#include some_macro()
This function is bound as a task method on :py:class:`waflib.Tools.c.c` and :py:class:`waflib.Tools.cxx.cxx` for example
"""
global go_absolute
try:
incn = task.generator.includes_nodes
except AttributeError:
raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator)
if go_absolute:
nodepaths = incn + [task.generator.bld.root.find_dir(x) for x in standard_includes]
else:
nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)]
tmp = c_parser(nodepaths)
tmp.start(task.inputs[0], task.env)
if Logs.verbose:
debug('deps: deps for %r: %r; unresolved %r' % (task.inputs, tmp.nodes, tmp.names))
return (tmp.nodes, tmp.names)
| RoyalTS/econ-python-environment | .mywaflib/waflib/Tools/c_preproc.py | Python | bsd-3-clause | 27,412 |
"""
The tests exercise the casting machinery in a more low-level manner.
The reason is mostly to test a new implementation of the casting machinery.
Unlike most tests in NumPy, these are closer to unit-tests rather
than integration tests.
"""
import pytest
import textwrap
import enum
import itertools
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
# Simple skips object, parametric and long double (unsupported by struct)
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
# Remove l and L, the table was generated with 64bit linux in mind.
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
"""Returns the string length when casting the basic dtypes to strings.
"""
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1 # adds one character for the sign
return length
# Note: Can't do dtype comparison for longdouble on windows
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
cast_is_view = 1 << 16
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
These test cases exercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if casting & Casting.cast_is_view:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no | Casting.cast_is_view
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = (casting & ~Casting.cast_is_view) <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert(casting & CAST_TABLE[from_Dt][type(time_dt)])
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "nom", "denom"],
[("M8[ns]", None,
Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _ = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
assert safety == Casting.unsafe
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no | Casting.cast_is_view
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_object_and_simple_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(object_dtype, type(dtype))
safety, (_, res_dt) = cast._resolve_descriptors((np.dtype("O"), dtype))
assert safety == Casting.unsafe
assert res_dt is dtype
safety, (_, res_dt) = cast._resolve_descriptors((np.dtype("O"), None))
assert safety == Casting.unsafe
assert res_dt == dtype.newbyteorder("=")
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_simple_to_object_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(type(dtype), object_dtype)
safety, (_, res_dt) = cast._resolve_descriptors((dtype, None))
assert safety == Casting.safe
assert res_dt is np.dtype("O")
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_object_casts_NULL_None_equivalence(self, dtype):
# None to <other> casts may succeed or fail, but a NULL'ed array must
# behave the same as one filled with None's.
arr_normal = np.array([None] * 5)
arr_NULLs = np.empty_like([None] * 5)
# If the check fails (maybe it should) the test would lose its purpose:
assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
try:
expected = arr_normal.astype(dtype)
except TypeError:
with pytest.raises(TypeError):
arr_NULLs.astype(dtype),
else:
assert_array_equal(expected, arr_NULLs.astype(dtype))
def test_float_to_bool(self):
# test case corresponding to gh-19514
# simple test for casting bool_ to float16
res = np.array([0, 3, -7], dtype=np.int8).view(bool)
expected = [0, 1, 1]
assert_array_equal(res, expected)
| simongibbons/numpy | numpy/core/tests/test_casting_unittests.py | Python | bsd-3-clause | 29,168 |
"""Convenience functions for padding
.. versionadded:: 0.1.4
"""
from __future__ import division, print_function
import numpy as np
def _get_pad_left_right(small, large):
"""Compute left and right padding values.
Here we use the convention that if the padding
size is odd, we pad the odd part to the right
and the even part to the left.
Parameters
----------
small: int
Old size of original 1D array
large: int
New size off padded 1D array
Returns
-------
(padleft, padright) : tuple
The proposed padding sizes.
"""
assert small < large, "Can only pad when new size larger than old size"
padsize = large - small
if padsize % 2 != 0:
leftpad = (padsize - 1)/2
else:
leftpad = padsize/2
rightpad = padsize-leftpad
return int(leftpad), int(rightpad)
def pad_add(av, size=None, stlen=10):
""" Perform linear padding for complex array
The input array `av` is padded with a linear ramp starting at the
edges and going outwards to an average value computed from a band
of thickness `stlen` at the outer boundary of the array.
Pads will only be appended, not prepended to the array.
If the input array is complex, pads will be complex numbers
The average is computed for phase and amplitude separately.
Parameters
----------
av: complex 1D or 2D ndarray
The array that will be padded.
size: int or tuple of length 1 (1D) or tuple of length 2 (2D), optional
The final size of the padded array. Defaults to double the size
of the input array.
stlen: int, optional
The thickness of the frame within `av` that will be used to
compute an average value for padding.
Returns
-------
pv: complex 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in av.shape:
size.append(int(2*s))
elif not hasattr(size, "__len__"):
size = [size]
assert len(av.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(av.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(av.shape) == 2:
return _pad_add_2d(av, size, stlen)
else:
return _pad_add_1d(av, size, stlen)
def _pad_add_1d(av, size, stlen):
"""1D component of `pad_add`"""
assert len(size) == 1
padx = _get_pad_left_right(av.shape[0], size[0])
mask = np.zeros(av.shape, dtype=bool)
mask[stlen:-stlen] = True
border = av[~mask]
if av.dtype.name.count("complex"):
padval = np.average(np.abs(border)) * \
np.exp(1j*np.average(np.angle(border)))
else:
padval = np.average(border)
if np.__version__[:3] in ["1.7", "1.8", "1.9"]:
end_values = ((padval, padval),)
else:
end_values = (padval,)
bv = np.pad(av,
padx,
mode="linear_ramp",
end_values=end_values)
# roll the array so that the padding values are on the right
bv = np.roll(bv, -padx[0], 0)
return bv
def _pad_add_2d(av, size, stlen):
"""2D component of `pad_add`"""
assert len(size) == 2
padx = _get_pad_left_right(av.shape[0], size[0])
pady = _get_pad_left_right(av.shape[1], size[1])
mask = np.zeros(av.shape, dtype=bool)
mask[stlen:-stlen, stlen:-stlen] = True
border = av[~mask]
if av.dtype.name.count("complex"):
padval = np.average(np.abs(border)) * \
np.exp(1j*np.average(np.angle(border)))
else:
padval = np.average(border)
if np.__version__[:3] in ["1.7", "1.8", "1.9"]:
end_values = ((padval, padval), (padval, padval))
else:
end_values = (padval,)
bv = np.pad(av,
(padx, pady),
mode="linear_ramp",
end_values=end_values)
# roll the array so that the padding values are on the right
bv = np.roll(bv, -padx[0], 0)
bv = np.roll(bv, -pady[0], 1)
return bv
def pad_rem(pv, size=None):
"""Removes linear padding from array
This is a convenience function that does the opposite
of `pad_add`.
Parameters
----------
pv: 1D or 2D ndarray
The array from which the padding will be removed.
size: tuple of length 1 (1D) or 2 (2D), optional
The final size of the un-padded array. Defaults to half the size
of the input array.
Returns
-------
pv: 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in pv.shape:
assert s % 2 == 0, "Uneven size; specify correct size of output!"
size.append(int(s/2))
elif not hasattr(size, "__len__"):
size = [size]
assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(pv.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(pv.shape) == 2:
return pv[:size[0], :size[1]]
else:
return pv[:size[0]]
| RI-imaging/nrefocus | nrefocus/pad.py | Python | bsd-3-clause | 5,111 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import time
from trac.core import TracError
from trac.db.util import with_transaction
from trac.util import hex_entropy
from trac.util.html import Markup
UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day
PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle
COOKIE_KEY = 'trac_session'
class DetachedSession(dict):
def __init__(self, env, sid):
dict.__init__(self)
self.env = env
self.sid = None
self.last_visit = 0
self._new = True
self._old = {}
if sid:
self.get_session(sid, authenticated=True)
else:
self.authenticated = False
def __setitem__(self, key, value):
dict.__setitem__(self, key, unicode(value))
def get_session(self, sid, authenticated=False):
self.env.log.debug('Retrieving session for ID %r', sid)
db = self.env.get_db_cnx()
cursor = db.cursor()
self.sid = sid
self.authenticated = authenticated
cursor.execute("SELECT last_visit FROM session "
"WHERE sid=%s AND authenticated=%s",
(sid, int(authenticated)))
row = cursor.fetchone()
if not row:
return
self._new = False
self.last_visit = int(row[0] or 0)
cursor.execute("SELECT name,value FROM session_attribute "
"WHERE sid=%s and authenticated=%s",
(sid, int(authenticated)))
for name, value in cursor:
self[name] = value
self._old.update(self)
def save(self):
if not self._old and not self.items():
# The session doesn't have associated data, so there's no need to
# persist it
return
authenticated = int(self.authenticated)
now = int(time.time())
@with_transaction(self.env)
def delete_session_cookie(db):
cursor = db.cursor()
if self._new:
self.last_visit = now
self._new = False
# The session might already exist even if _new is True since
# it could have been created by a concurrent request (#3563).
try:
cursor.execute("INSERT INTO session "
" (sid,last_visit,authenticated)"
" VALUES (%s,%s,%s)",
(self.sid, self.last_visit, authenticated))
except Exception:
db.rollback()
self.env.log.warning('Session %s already exists', self.sid)
if self._old != self:
attrs = [(self.sid, authenticated, k, v)
for k, v in self.items()]
cursor.execute("DELETE FROM session_attribute WHERE sid=%s",
(self.sid,))
self._old = dict(self.items())
if attrs:
# The session variables might already have been updated
# by a concurrent request.
try:
cursor.executemany("INSERT INTO session_attribute "
" (sid,authenticated,name,value) "
" VALUES (%s,%s,%s,%s)", attrs)
except Exception:
db.rollback()
self.env.log.warning('Attributes for session %s '
'already updated', self.sid)
elif not authenticated:
# No need to keep around empty unauthenticated sessions
cursor.execute("DELETE FROM session "
"WHERE sid=%s AND authenticated=0",
(self.sid,))
return
# Update the session last visit time if it is over an hour old,
# so that session doesn't get purged
if now - self.last_visit > UPDATE_INTERVAL:
self.last_visit = now
self.env.log.info("Refreshing session %s", self.sid)
cursor.execute('UPDATE session SET last_visit=%s '
'WHERE sid=%s AND authenticated=%s',
(self.last_visit, self.sid, authenticated))
# Purge expired sessions. We do this only when the session was
# changed as to minimize the purging.
mintime = now - PURGE_AGE
self.env.log.debug('Purging old, expired, sessions.')
cursor.execute("DELETE FROM session_attribute "
"WHERE authenticated=0 AND sid "
"IN (SELECT sid FROM session WHERE "
"authenticated=0 AND last_visit < %s)",
(mintime,))
cursor.execute("DELETE FROM session WHERE "
"authenticated=0 AND last_visit < %s",
(mintime,))
class Session(DetachedSession):
"""Basic session handling and per-session storage."""
def __init__(self, env, req):
super(Session, self).__init__(env, None)
self.req = req
if req.authname == 'anonymous':
if not req.incookie.has_key(COOKIE_KEY):
self.sid = hex_entropy(24)
self.bake_cookie()
else:
sid = req.incookie[COOKIE_KEY].value
self.get_session(sid)
else:
if req.incookie.has_key(COOKIE_KEY):
sid = req.incookie[COOKIE_KEY].value
self.promote_session(sid)
self.get_session(req.authname, authenticated=True)
def bake_cookie(self, expires=PURGE_AGE):
assert self.sid, 'Session ID not set'
self.req.outcookie[COOKIE_KEY] = self.sid
self.req.outcookie[COOKIE_KEY]['path'] = self.req.base_path or '/'
self.req.outcookie[COOKIE_KEY]['expires'] = expires
if self.env.secure_cookies:
self.req.outcookie[COOKIE_KEY]['secure'] = True
def get_session(self, sid, authenticated=False):
refresh_cookie = False
if self.sid and sid != self.sid:
refresh_cookie = True
super(Session, self).get_session(sid, authenticated)
if self.last_visit and time.time() - self.last_visit > UPDATE_INTERVAL:
refresh_cookie = True
# Refresh the session cookie if this is the first visit since over a day
if not authenticated and refresh_cookie:
self.bake_cookie()
def change_sid(self, new_sid):
assert self.req.authname == 'anonymous', \
'Cannot change ID of authenticated session'
assert new_sid, 'Session ID cannot be empty'
if new_sid == self.sid:
return
cursor = self.env.get_db_cnx().cursor()
cursor.execute("SELECT sid FROM session WHERE sid=%s", (new_sid,))
if cursor.fetchone():
raise TracError(Markup('Session "%s" already exists.<br />'
'Please choose a different session ID.')
% new_sid, 'Error renaming session')
self.env.log.debug('Changing session ID %s to %s', self.sid, new_sid)
@with_transaction(self.env)
def update_session_id(db):
cursor = db.cursor()
cursor.execute("UPDATE session SET sid=%s WHERE sid=%s "
"AND authenticated=0", (new_sid, self.sid))
cursor.execute("UPDATE session_attribute SET sid=%s "
"WHERE sid=%s and authenticated=0",
(new_sid, self.sid))
self.sid = new_sid
self.bake_cookie()
def promote_session(self, sid):
"""Promotes an anonymous session to an authenticated session, if there
is no preexisting session data for that user name.
"""
assert self.req.authname != 'anonymous', \
'Cannot promote session of anonymous user'
@with_transaction(self.env)
def update_session_id(db):
cursor = db.cursor()
cursor.execute("SELECT authenticated FROM session "
"WHERE sid=%s OR sid=%s ", (sid, self.req.authname))
authenticated_flags = [row[0] for row in cursor.fetchall()]
if len(authenticated_flags) == 2:
# There's already an authenticated session for the user,
# we simply delete the anonymous session
cursor.execute("DELETE FROM session WHERE sid=%s "
"AND authenticated=0", (sid,))
cursor.execute("DELETE FROM session_attribute WHERE sid=%s "
"AND authenticated=0", (sid,))
elif len(authenticated_flags) == 1:
if not authenticated_flags[0]:
# Update the anomymous session records so the session ID
# becomes the user name, and set the authenticated flag.
self.env.log.debug('Promoting anonymous session %s to '
'authenticated session for user %s',
sid, self.req.authname)
cursor.execute("UPDATE session SET sid=%s,authenticated=1 "
"WHERE sid=%s AND authenticated=0",
(self.req.authname, sid))
cursor.execute("UPDATE session_attribute "
"SET sid=%s,authenticated=1 WHERE sid=%s",
(self.req.authname, sid))
else:
# we didn't have an anonymous session for this sid
cursor.execute("INSERT INTO session "
"(sid,last_visit,authenticated)"
" VALUES(%s,%s,1)",
(self.req.authname, int(time.time())))
self._new = False
self.sid = sid
self.bake_cookie(0) # expire the cookie
| dokipen/trac | trac/web/session.py | Python | bsd-3-clause | 11,069 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Signature) on 2017-03-22.
# 2017, SMART Health IT.
from . import element
class Signature(element.Element):
""" A digital Signature - XML DigSig, JWT, Graphical image of signature, etc..
A digital signature along with supporting context. The signature may be
electronic/cryptographic in nature, or a graphical image representing a
hand-written signature, or a signature process. Different signature
approaches have different utilities.
"""
resource_type = "Signature"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.blob = None
""" The actual signature content (XML DigSig. JWT, picture, etc.).
Type `str`. """
self.contentType = None
""" The technical format of the signature.
Type `str`. """
self.onBehalfOfReference = None
""" The party represented.
Type `FHIRReference` referencing `Practitioner, RelatedPerson, Patient, Device, Organization` (represented as `dict` in JSON). """
self.onBehalfOfUri = None
""" The party represented.
Type `str`. """
self.type = None
""" Indication of the reason the entity signed the object(s).
List of `Coding` items (represented as `dict` in JSON). """
self.when = None
""" When the signature was created.
Type `FHIRDate` (represented as `str` in JSON). """
self.whoReference = None
""" Who signed.
Type `FHIRReference` referencing `Practitioner, RelatedPerson, Patient, Device, Organization` (represented as `dict` in JSON). """
self.whoUri = None
""" Who signed.
Type `str`. """
super(Signature, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Signature, self).elementProperties()
js.extend([
("blob", "blob", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("onBehalfOfReference", "onBehalfOfReference", fhirreference.FHIRReference, False, "onBehalfOf", False),
("onBehalfOfUri", "onBehalfOfUri", str, False, "onBehalfOf", False),
("type", "type", coding.Coding, True, None, True),
("when", "when", fhirdate.FHIRDate, False, None, True),
("whoReference", "whoReference", fhirreference.FHIRReference, False, "who", True),
("whoUri", "whoUri", str, False, "who", True),
])
return js
import sys
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
| all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/signature.py | Python | bsd-3-clause | 3,350 |