repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
EIREXE/SpaceDock | refs/heads/dev | alembic/versions/46d0c3d3104_update_mod_media_schema.py | 2 | """Update mod/media schema
Revision ID: 46d0c3d3104
Revises: 2ef141f5132
Create Date: 2014-06-06 00:34:39.866314
"""
# revision identifiers, used by Alembic.
revision = '46d0c3d3104'
down_revision = '2ef141f5132'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('mod', sa.Column('background', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('mod', 'background')
### end Alembic commands ###
|
Eelviny/nxt-python | refs/heads/master | nxt/ipsock.py | 3 | # nxt.ipsock module -- Server socket communication with LEGO Minstorms NXT
# Copyright (C) 2011 zonedaobne, Marcus Wanner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import socket
from nxt.brick import Brick
class IpSock(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = None
self.debug = False
def __str__(self):
return 'Server (%s)' % self.host
def connect(self):
if self.debug:
print('Connecting via Server...')
sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
#TODO: sasl authentication here?
self.sock = sock
if self.debug:
print('Connected.')
self.send('\x98')
self.type = 'ip'+self.recv()
return Brick(self)
def close(self):
if self.debug:
print('Closing Server connection...')
self.sock.send('\x99')
self.sock.close()
if self.debug:
print('Server connection closed.')
def send(self, data):
if self.debug:
print('Send:', end=' ')
print(':'.join('%02x' % ord(c) for c in data))
self.sock.send(data)
def recv(self):
data = self.sock.recv(1024)
if self.debug:
print('Recv:', end=' ')
print(':'.join('%02x' % ord(c) for c in data))
return data
#TODO: add a find_bricks method and a corresponding broadcast system to nxt_server?
|
nevrenato/HetsAlloy | refs/heads/master | FreeCAD/BrepConversion/src/brep_reader.py | 6 | from OCC.Display.SimpleGui import *
from OCC import BRep
from OCC.BRepTools import *
from OCC import TopoDS
from OCC.Message import Message_PrinterOStream
import sys
fileName = 'input.brp'
if (len(sys.argv) > 1):
fileName = sys.argv[1]
brep_instance = BRepTools()
shape = TopoDS.TopoDS_Shape()
builder = BRep.BRep_Builder()
printrerStream = Message_PrinterOStream()
s = printrerStream.GetStream()
brep_instance.Read(shape, str(fileName), builder)
|
apyrgio/synnefo | refs/heads/release-0.16 | snf-cyclades-app/synnefo/logic/servers.py | 1 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from datetime import datetime
from socket import getfqdn
from django import dispatch
from synnefo.db import transaction
from django.utils import simplejson as json
from snf_django.lib.api import faults
from django.conf import settings
from synnefo.api import util
from synnefo.logic import backend, ips, utils
from synnefo.logic.backend_allocator import BackendAllocator
from synnefo.db.models import (NetworkInterface, VirtualMachine,
VirtualMachineMetadata, IPAddressLog, Network,
Image, pooled_rapi_client)
from vncauthproxy.client import request_forwarding as request_vnc_forwarding
from synnefo.logic import rapi
from synnefo.volume.volumes import _create_volume
from synnefo.volume.util import get_volume
from synnefo.logic import commands
from synnefo import quotas
log = logging.getLogger(__name__)
# server creation signal
server_created = dispatch.Signal(providing_args=["created_vm_params"])
@transaction.commit_on_success
def create(userid, name, password, flavor, image_id, metadata={},
personality=[], networks=None, use_backend=None, project=None,
volumes=None):
utils.check_name_length(name, VirtualMachine.VIRTUAL_MACHINE_NAME_LENGTH,
"Server name is too long")
# Get the image, if any, that is used for the first volume
vol_image_id = None
if volumes:
vol = volumes[0]
if vol["source_type"] in ["image", "snapshot"]:
vol_image_id = vol["source_uuid"]
# Check conflict between server's and volume's image
if image_id and vol_image_id and image_id != vol_image_id:
raise faults.BadRequest("The specified server's image is different"
" from the the source of the first volume.")
elif vol_image_id and not image_id:
image_id = vol_image_id
elif not image_id:
raise faults.BadRequest("You need to specify either an image or a"
" block device mapping.")
if len(metadata) > settings.CYCLADES_VM_MAX_METADATA:
raise faults.BadRequest("Virtual Machines cannot have more than %s "
"metadata items" %
settings.CYCLADES_VM_MAX_METADATA)
# Get image info
image = util.get_image_dict(image_id, userid)
if not volumes:
# If no volumes are specified, we automatically create a volume with
# the size of the flavor and filled with the specified image.
volumes = [{"source_type": "image",
"source_uuid": image_id,
"size": flavor.disk,
"delete_on_termination": True}]
assert(len(volumes) > 0), "Cannot create server without volumes"
if volumes[0]["source_type"] == "blank":
raise faults.BadRequest("Root volume cannot be blank")
try:
is_system = (image["owner"] == settings.SYSTEM_IMAGES_OWNER)
img, created = Image.objects.get_or_create(uuid=image["id"],
version=image["version"])
if created:
img.owner = image["owner"]
img.name = image["name"]
img.location = image["location"]
img.mapfile = image["mapfile"]
img.is_public = image["is_public"]
img.is_snapshot = image["is_snapshot"]
img.is_system = is_system
img.os = image["metadata"].get("OS", "unknown")
img.osfamily = image["metadata"].get("OSFAMILY", "unknown")
img.save()
except Exception as e:
# Image info is not critical. Continue if it fails for any reason
log.warning("Failed to store image info: %s", e)
if use_backend is None:
# Allocate server to a Ganeti backend
use_backend = allocate_new_server(userid, flavor)
# Create the ports for the server
ports = create_instance_ports(userid, networks)
if project is None:
project = userid
# We must save the VM instance now, so that it gets a valid
# vm.backend_vm_id.
vm = VirtualMachine.objects.create(name=name,
backend=use_backend,
userid=userid,
project=project,
imageid=image["id"],
image_version=image["version"],
flavor=flavor,
operstate="BUILD")
log.info("Created entry in DB for VM '%s'", vm)
# Associate the ports with the server
for index, port in enumerate(ports):
associate_port_with_machine(port, vm)
port.index = index
port.save()
# Create instance volumes
server_vtype = flavor.volume_type
server_volumes = []
for index, vol_info in enumerate(volumes):
if vol_info["source_type"] == "volume":
uuid = vol_info["source_uuid"]
v = get_volume(userid, uuid, for_update=True, non_deleted=True,
exception=faults.BadRequest)
if v.volume_type_id != server_vtype.id:
msg = ("Volume '%s' has type '%s' while flavor's volume type"
" is '%s'" % (v.id, v.volume_type_id, server_vtype.id))
raise faults.BadRequest(msg)
if v.status != "AVAILABLE":
raise faults.BadRequest("Cannot use volume while it is in %s"
" status" % v.status)
v.delete_on_termination = vol_info["delete_on_termination"]
v.machine = vm
v.index = index
v.save()
else:
v = _create_volume(server=vm, user_id=userid,
volume_type=server_vtype, project=project,
index=index, **vol_info)
server_volumes.append(v)
# Create instance metadata
for key, val in metadata.items():
utils.check_name_length(key, VirtualMachineMetadata.KEY_LENGTH,
"Metadata key is too long")
utils.check_name_length(val, VirtualMachineMetadata.VALUE_LENGTH,
"Metadata value is too long")
VirtualMachineMetadata.objects.create(
meta_key=key,
meta_value=val,
vm=vm)
# Create the server in Ganeti.
vm = create_server(vm, ports, server_volumes, flavor, image, personality,
password)
return vm
@transaction.commit_on_success
def allocate_new_server(userid, flavor):
"""Allocate a new server to a Ganeti backend.
Allocation is performed based on the owner of the server and the specified
flavor. Also, backends that do not have a public IPv4 address are excluded
from server allocation.
This function runs inside a transaction, because after allocating the
instance a commit must be performed in order to release all locks.
"""
backend_allocator = BackendAllocator()
use_backend = backend_allocator.allocate(userid, flavor)
if use_backend is None:
log.error("No available backend for VM with flavor %s", flavor)
raise faults.ServiceUnavailable("No available backends")
return use_backend
@commands.server_command("BUILD")
def create_server(vm, nics, volumes, flavor, image, personality, password):
# dispatch server created signal needed to trigger the 'vmapi', which
# enriches the vm object with the 'config_url' attribute which must be
# passed to the Ganeti job.
# If the root volume has a provider, then inform snf-image to not fill
# the volume with data
image_id = image["pithosmap"]
root_volume = volumes[0]
if root_volume.volume_type.provider in settings.GANETI_CLONE_PROVIDERS:
image_id = "null"
server_created.send(sender=vm, created_vm_params={
'img_id': image_id,
'img_passwd': password,
'img_format': str(image['format']),
'img_personality': json.dumps(personality),
'img_properties': json.dumps(image['metadata']),
})
# send job to Ganeti
try:
jobID = backend.create_instance(vm, nics, volumes, flavor, image)
except:
log.exception("Failed create instance '%s'", vm)
jobID = None
vm.operstate = "ERROR"
vm.backendlogmsg = "Failed to send job to Ganeti."
vm.save()
vm.nics.all().update(state="ERROR")
vm.volumes.all().update(status="ERROR")
# At this point the job is enqueued in the Ganeti backend
vm.backendopcode = "OP_INSTANCE_CREATE"
vm.backendjobid = jobID
vm.save()
log.info("User %s created VM %s, NICs %s, Backend %s, JobID %s",
vm.userid, vm, nics, vm.backend, str(jobID))
return jobID
@commands.server_command("DESTROY")
def destroy(vm, shutdown_timeout=None):
# XXX: Workaround for race where OP_INSTANCE_REMOVE starts executing on
# Ganeti before OP_INSTANCE_CREATE. This will be fixed when
# OP_INSTANCE_REMOVE supports the 'depends' request attribute.
if (vm.backendopcode == "OP_INSTANCE_CREATE" and
vm.backendjobstatus not in rapi.JOB_STATUS_FINALIZED and
backend.job_is_still_running(vm) and
not backend.vm_exists_in_backend(vm)):
raise faults.BuildInProgress("Server is being build")
log.info("Deleting VM %s", vm)
return backend.delete_instance(vm, shutdown_timeout=shutdown_timeout)
@commands.server_command("START")
def start(vm):
log.info("Starting VM %s", vm)
return backend.startup_instance(vm)
@commands.server_command("STOP")
def stop(vm, shutdown_timeout=None):
log.info("Stopping VM %s", vm)
return backend.shutdown_instance(vm, shutdown_timeout=shutdown_timeout)
@commands.server_command("REBOOT")
def reboot(vm, reboot_type, shutdown_timeout=None):
if reboot_type not in ("SOFT", "HARD"):
raise faults.BadRequest("Malformed request. Invalid reboot"
" type %s" % reboot_type)
log.info("Rebooting VM %s. Type %s", vm, reboot_type)
return backend.reboot_instance(vm, reboot_type.lower(),
shutdown_timeout=shutdown_timeout)
def resize(vm, flavor):
action_fields = {"beparams": {"vcpus": flavor.cpu,
"maxmem": flavor.ram}}
comm = commands.server_command("RESIZE", action_fields=action_fields)
return comm(_resize)(vm, flavor)
def _resize(vm, flavor):
old_flavor = vm.flavor
# User requested the same flavor
if old_flavor.id == flavor.id:
raise faults.BadRequest("Server '%s' flavor is already '%s'."
% (vm, flavor))
# Check that resize can be performed
if old_flavor.disk != flavor.disk:
raise faults.BadRequest("Cannot change instance's disk size.")
if old_flavor.volume_type_id != flavor.volume_type_id:
raise faults.BadRequest("Cannot change instance's volume type.")
log.info("Resizing VM from flavor '%s' to '%s", old_flavor, flavor)
return backend.resize_instance(vm, vcpus=flavor.cpu, memory=flavor.ram)
@transaction.commit_on_success
def reassign(vm, project):
commands.validate_server_action(vm, "REASSIGN")
action_fields = {"to_project": project, "from_project": vm.project}
log.info("Reassigning VM %s from project %s to %s",
vm, vm.project, project)
vm.project = project
vm.save()
vm.volumes.filter(index=0, deleted=False).update(project=project)
quotas.issue_and_accept_commission(vm, action="REASSIGN",
action_fields=action_fields)
@commands.server_command("SET_FIREWALL_PROFILE")
def set_firewall_profile(vm, profile, nic):
log.info("Setting VM %s, NIC %s, firewall %s", vm, nic, profile)
if profile not in [x[0] for x in NetworkInterface.FIREWALL_PROFILES]:
raise faults.BadRequest("Unsupported firewall profile")
backend.set_firewall_profile(vm, profile=profile, nic=nic)
return None
@commands.server_command("CONNECT")
def connect(vm, network, port=None):
if port is None:
port = _create_port(vm.userid, network)
associate_port_with_machine(port, vm)
log.info("Creating NIC %s with IPv4 Address %s", port, port.ipv4_address)
return backend.connect_to_network(vm, port)
@commands.server_command("DISCONNECT")
def disconnect(vm, nic):
log.info("Removing NIC %s from VM %s", nic, vm)
return backend.disconnect_from_network(vm, nic)
def console(vm, console_type):
"""Arrange for an OOB console of the specified type
This method arranges for an OOB console of the specified type.
Only consoles of type "vnc" are supported for now.
It uses a running instance of vncauthproxy to setup proper
VNC forwarding with a random password, then returns the necessary
VNC connection info to the caller.
"""
log.info("Get console VM %s, type %s", vm, console_type)
if vm.operstate != "STARTED":
raise faults.BadRequest('Server not in ACTIVE state.')
# Use RAPI to get VNC console information for this instance
# RAPI GetInstanceConsole() returns endpoints to the vnc_bind_address,
# which is a cluster-wide setting, either 0.0.0.0 or 127.0.0.1, and pretty
# useless (see #783).
#
# Until this is fixed on the Ganeti side, construct a console info reply
# directly.
#
# WARNING: This assumes that VNC runs on port network_port on
# the instance's primary node, and is probably
# hypervisor-specific.
def get_console_data(i):
return {"kind": "vnc",
"host": i["pnode"],
"port": i["network_port"]}
with pooled_rapi_client(vm) as c:
i = c.GetInstance(vm.backend_vm_id)
console_data = get_console_data(i)
if vm.backend.hypervisor == "kvm" and i['hvparams']['serial_console']:
raise Exception("hv parameter serial_console cannot be true")
# Check that the instance is really running
if not i["oper_state"]:
log.warning("VM '%s' is marked as '%s' in DB while DOWN in Ganeti",
vm.id, vm.operstate)
# Instance is not running. Mock a shutdown job to sync DB
backend.process_op_status(vm, etime=datetime.now(), jobid=0,
opcode="OP_INSTANCE_SHUTDOWN",
status="success",
logmsg="Reconciliation simulated event")
raise faults.BadRequest('Server not in ACTIVE state.')
# Let vncauthproxy decide on the source port.
# The alternative: static allocation, e.g.
# sport = console_data['port'] - 1000
sport = 0
daddr = console_data['host']
dport = console_data['port']
password = util.random_password()
vnc_extra_opts = settings.CYCLADES_VNCAUTHPROXY_OPTS
fwd = request_vnc_forwarding(sport, daddr, dport, password,
console_type=console_type, **vnc_extra_opts)
if fwd['status'] != "OK":
log.error("vncauthproxy returned error status: '%s'" % fwd)
raise faults.ServiceUnavailable('vncauthproxy returned error status')
# Verify that the VNC server settings haven't changed
with pooled_rapi_client(vm) as c:
i = c.GetInstance(vm.backend_vm_id)
if get_console_data(i) != console_data:
raise faults.ServiceUnavailable('VNC Server settings changed.')
console = {
'type': console_type,
'host': getfqdn(),
'port': fwd['source_port'],
'password': password}
return console
def rename(server, new_name):
"""Rename a VirtualMachine."""
utils.check_name_length(new_name,
VirtualMachine.VIRTUAL_MACHINE_NAME_LENGTH,
"Server name is too long")
old_name = server.name
server.name = new_name
server.save()
log.info("Renamed server '%s' from '%s' to '%s'", server, old_name,
new_name)
return server
@transaction.commit_on_success
def create_port(*args, **kwargs):
vm = kwargs.get("machine", None)
if vm is None and len(args) >= 3:
vm = args[2]
if vm is not None:
if vm.nics.count() == settings.GANETI_MAX_NICS_PER_INSTANCE:
raise faults.BadRequest("Maximum ports per server limit reached")
return _create_port(*args, **kwargs)
def _create_port(userid, network, machine=None, use_ipaddress=None,
address=None, name="", security_groups=None,
device_owner=None):
"""Create a new port on the specified network.
Create a new Port(NetworkInterface model) on the specified Network. If
'machine' is specified, the machine will be connected to the network using
this port. If 'use_ipaddress' argument is specified, the port will be
assigned this IPAddress. Otherwise, an IPv4 address from the IPv4 subnet
will be allocated.
"""
if network.state != "ACTIVE":
raise faults.Conflict("Cannot create port while network '%s' is in"
" '%s' status" % (network.id, network.state))
elif network.action == "DESTROY":
msg = "Cannot create port. Network %s is being deleted."
raise faults.Conflict(msg % network.id)
utils.check_name_length(name, NetworkInterface.NETWORK_IFACE_NAME_LENGTH,
"Port name is too long")
ipaddress = None
if use_ipaddress is not None:
# Use an existing IPAddress object.
ipaddress = use_ipaddress
if ipaddress and (ipaddress.network_id != network.id):
msg = "IP Address %s does not belong to network %s"
raise faults.Conflict(msg % (ipaddress.address, network.id))
else:
# Do not allow allocation of new IPs if the network is drained
if network.drained:
raise faults.Conflict("Cannot create port while network %s is in"
" 'SNF:DRAINED' status" % network.id)
# If network has IPv4 subnets, try to allocate the address that the
# the user specified or a random one.
if network.subnets.filter(ipversion=4).exists():
ipaddress = ips.allocate_ip(network, userid=userid,
address=address)
elif address is not None:
raise faults.BadRequest("Address %s is not a valid IP for the"
" defined network subnets" % address)
if ipaddress is not None and ipaddress.nic is not None:
raise faults.Conflict("IP address '%s' is already in use" %
ipaddress.address)
port = NetworkInterface.objects.create(network=network,
state="DOWN",
userid=userid,
device_owner=None,
public=network.public,
name=name)
# add the security groups if any
if security_groups:
port.security_groups.add(*security_groups)
if ipaddress is not None:
# Associate IPAddress with the Port
ipaddress.nic = port
ipaddress.save()
if machine is not None:
# Connect port to the instance.
machine = connect(machine, network, port)
jobID = machine.task_job_id
log.info("Created Port %s with IP %s. Ganeti Job: %s",
port, ipaddress, jobID)
else:
log.info("Created Port %s with IP %s not attached to any instance",
port, ipaddress)
return port
def associate_port_with_machine(port, machine):
"""Associate a Port with a VirtualMachine.
Associate the port with the VirtualMachine and add an entry to the
IPAddressLog if the port has a public IPv4 address from a public network.
"""
if port.machine is not None:
raise faults.Conflict("Port %s is already in use." % port.id)
if port.network.public:
ipv4_address = port.ipv4_address
if ipv4_address is not None:
ip_log = IPAddressLog.objects.create(server_id=machine.id,
network_id=port.network_id,
address=ipv4_address,
active=True)
log.debug("Created IP log entry %s", ip_log)
port.machine = machine
port.state = "BUILD"
port.device_owner = "vm"
port.save()
return port
@transaction.commit_on_success
def delete_port(port):
"""Delete a port by removing the NIC card from the instance.
Send a Job to remove the NIC card from the instance. The port
will be deleted and the associated IPv4 addressess will be released
when the job completes successfully.
"""
vm = port.machine
if vm is not None and not vm.deleted:
vm = disconnect(port.machine, port)
log.info("Removing port %s, Job: %s", port, vm.task_job_id)
else:
backend.remove_nic_ips(port)
port.delete()
log.info("Removed port %s", port)
return port
def create_instance_ports(user_id, networks=None):
# First connect the instance to the networks defined by the admin
forced_ports = create_ports_for_setting(user_id, category="admin")
if networks is None:
# If the user did not asked for any networks, connect instance to
# default networks as defined by the admin
ports = create_ports_for_setting(user_id, category="default")
else:
# Else just connect to the networks that the user defined
ports = create_ports_for_request(user_id, networks)
total_ports = forced_ports + ports
if len(total_ports) > settings.GANETI_MAX_NICS_PER_INSTANCE:
raise faults.BadRequest("Maximum ports per server limit reached")
return total_ports
def create_ports_for_setting(user_id, category):
if category == "admin":
network_setting = settings.CYCLADES_FORCED_SERVER_NETWORKS
exception = faults.ServiceUnavailable
elif category == "default":
network_setting = settings.CYCLADES_DEFAULT_SERVER_NETWORKS
exception = faults.Conflict
else:
raise ValueError("Unknown category: %s" % category)
ports = []
for network_ids in network_setting:
# Treat even simple network IDs as group of networks with one network
if type(network_ids) not in (list, tuple):
network_ids = [network_ids]
error_msgs = []
for network_id in network_ids:
success = False
try:
ports.append(_port_from_setting(user_id, network_id, category))
# Port successfully created in one of the networks. Skip the
# the rest.
success = True
break
except faults.Conflict as e:
if len(network_ids) == 1:
raise exception(e.message)
else:
error_msgs.append(e.message)
if not success:
if category == "admin":
log.error("Cannot connect server to forced networks '%s': %s",
network_ids, error_msgs)
raise exception("Cannot connect server to forced server"
" networks.")
else:
log.debug("Cannot connect server to default networks '%s': %s",
network_ids, error_msgs)
raise exception("Cannot connect server to default server"
" networks.")
return ports
def _port_from_setting(user_id, network_id, category):
# TODO: Fix this..you need only IPv4 and only IPv6 network
if network_id == "SNF:ANY_PUBLIC_IPV4":
return create_public_ipv4_port(user_id, category=category)
elif network_id == "SNF:ANY_PUBLIC_IPV6":
return create_public_ipv6_port(user_id, category=category)
elif network_id == "SNF:ANY_PUBLIC":
try:
return create_public_ipv4_port(user_id, category=category)
except faults.Conflict as e1:
try:
return create_public_ipv6_port(user_id, category=category)
except faults.Conflict as e2:
log.error("Failed to connect server to a public IPv4 or IPv6"
" network. IPv4: %s, IPv6: %s", e1, e2)
msg = ("Cannot connect server to a public IPv4 or IPv6"
" network.")
raise faults.Conflict(msg)
else: # Case of network ID
if category in ["user", "default"]:
return _port_for_request(user_id, {"uuid": network_id})
elif category == "admin":
network = util.get_network(network_id, user_id, non_deleted=True)
return _create_port(user_id, network)
else:
raise ValueError("Unknown category: %s" % category)
def create_public_ipv4_port(user_id, network=None, address=None,
category="user"):
"""Create a port in a public IPv4 network.
Create a port in a public IPv4 network (that may also have an IPv6
subnet). If the category is 'user' or 'default' this will try to use
one of the users floating IPs. If the category is 'admin' will
create a port to the public network (without floating IPs or quotas).
"""
if category in ["user", "default"]:
if address is None:
ipaddress = ips.get_free_floating_ip(user_id, network)
else:
ipaddress = util.get_floating_ip_by_address(user_id, address,
for_update=True)
elif category == "admin":
if network is None:
ipaddress = ips.allocate_public_ip(user_id)
else:
ipaddress = ips.allocate_ip(network, user_id)
else:
raise ValueError("Unknown category: %s" % category)
if network is None:
network = ipaddress.network
return _create_port(user_id, network, use_ipaddress=ipaddress)
def create_public_ipv6_port(user_id, category=None):
"""Create a port in a public IPv6 only network."""
networks = Network.objects.filter(public=True, deleted=False,
drained=False, subnets__ipversion=6)\
.exclude(subnets__ipversion=4)
if networks:
return _create_port(user_id, networks[0])
else:
msg = "No available IPv6 only network!"
log.error(msg)
raise faults.Conflict(msg)
def create_ports_for_request(user_id, networks):
"""Create the server ports requested by the user.
Create the ports for the new servers as requested in the 'networks'
attribute. The networks attribute contains either a list of network IDs
('uuid') or a list of ports IDs ('port'). In case of network IDs, the user
can also specify an IPv4 address ('fixed_ip'). In order to connect to a
public network, the 'fixed_ip' attribute must contain the IPv4 address of a
floating IP. If the network is public but the 'fixed_ip' attribute is not
specified, the system will automatically reserve one of the users floating
IPs.
"""
if not isinstance(networks, list):
raise faults.BadRequest("Malformed request. Invalid 'networks' field")
return [_port_for_request(user_id, network) for network in networks]
def _port_for_request(user_id, network_dict):
if not isinstance(network_dict, dict):
raise faults.BadRequest("Malformed request. Invalid 'networks' field")
port_id = network_dict.get("port")
network_id = network_dict.get("uuid")
if port_id is not None:
return util.get_port(port_id, user_id, for_update=True)
elif network_id is not None:
address = network_dict.get("fixed_ip")
network = util.get_network(network_id, user_id, non_deleted=True)
if network.public:
if network.subnet4 is not None:
if "fixed_ip" not in network_dict:
return create_public_ipv4_port(user_id, network)
elif address is None:
msg = "Cannot connect to public network"
raise faults.BadRequest(msg % network.id)
else:
return create_public_ipv4_port(user_id, network, address)
else:
raise faults.Forbidden("Cannot connect to IPv6 only public"
" network '%s'" % network.id)
else:
return _create_port(user_id, network, address=address)
else:
raise faults.BadRequest("Network 'uuid' or 'port' attribute"
" is required.")
|
wsgicabal/wiseguy | refs/heads/master | wiseguy/components/notfound.py | 1 | """
main:
component: notfound
"""
from wiseguy import WSGIComponent
class NotFoundFactory(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, environ, start_response):
from webob.exc import HTTPNotFound
r = HTTPNotFound()
return r(environ, start_response)
NotFoundComponent = WSGIComponent(
schema = None,
factory = NotFoundFactory,
)
|
synweap15/pyload | refs/heads/stable | module/plugins/hoster/NovafileCom.py | 15 | # -*- coding: utf-8 -*-
#
# Test links:
# http://novafile.com/vfun4z6o2cit
# http://novafile.com/s6zrr5wemuz4
from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
class NovafileCom(XFSHoster):
__name__ = "NovafileCom"
__type__ = "hoster"
__version__ = "0.06"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?novafile\.com/\w{12}'
__description__ = """Novafile.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it")]
ERROR_PATTERN = r'class="alert.+?alert-separate".*?>\s*(?:<p>)?(.*?)\s*</'
WAIT_PATTERN = r'<p>Please wait <span id="count".*?>(\d+)</span> seconds</p>'
LINK_PATTERN = r'<a href="(http://s\d+\.novafile\.com/.*?)" class="btn btn-green">Download File</a>'
getInfo = create_getInfo(NovafileCom)
|
mcr/ietfdb | refs/heads/master | ietf/cookies/__init__.py | 1 | # Copyright The IETF Trust 2010, All Rights Reserved
# coding: latin-1
from types import ModuleType
import urls, views
# These people will be sent a stack trace if there's an uncaught exception in
# code any of the modules imported above:
DEBUG_EMAILS = [
('Tero Kivinen', 'kivinen@iki.fi'),
]
for k in locals().keys():
m = locals()[k]
if isinstance(m, ModuleType):
if hasattr(m, "DEBUG_EMAILS"):
DEBUG_EMAILS += list(getattr(m, "DEBUG_EMAILS"))
setattr(m, "DEBUG_EMAILS", DEBUG_EMAILS)
|
wolf29/games | refs/heads/master | py-proj-building-blocks/python3-chardet-2.0.1/chardet/langthaimodel.py | 25 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = ( \
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = { \
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
|
trwired/upath | refs/heads/master | test/test_upath.py | 1 | import os
from upath.upath import translate_path, split_path
def test_path_with_drive_letter():
path_in = r'C:\Python27'
path_out = 'C:/Python27'
assert translate_path(path_in) == path_out
def test_path_with_trailing_backslash():
path_in = r'C:\Python27\\'
path_out = 'C:/Python27/'
assert translate_path(path_in) == path_out
def test_path_with_spaces():
path_in = r'C:\Program Files\Microsoft Visual Studio 10.0\Common7\IDE'
path_out = r'"C:/Program Files/Microsoft Visual Studio 10.0/Common7/IDE"'
assert translate_path(path_in) == path_out
def test_path_without_drive_letter():
path_in = r'\Program Files\Microsoft Visual Studio 10.0\Common7\IDE'
path_out = r'"/Program Files/Microsoft Visual Studio 10.0/Common7/IDE"'
assert translate_path(path_in) == path_out
def test_path_with_relative_dirs1():
path_in = r'C:\Program Files\..\..\Common7\IDE'
path_out = r'"C:/Program Files/../../Common7/IDE"'
assert translate_path(path_in) == path_out
def test_path_with_relative_dirs2():
path_in = r'..\Program Files\..\..\Common7\IDE'
path_out = r'"../Program Files/../../Common7/IDE"'
assert translate_path(path_in) == path_out
def test_path_with_relative_dirs3():
path_in = r'C:\..\Program Files\..\..\Common7\IDE'
path_out = r'"C:/../Program Files/../../Common7/IDE"'
assert translate_path(path_in) == path_out
def test_path_split_1():
path = os.path.splitdrive(
r'C:\Program Files\Microsoft Visual Studio 10.0\Common7\IDE')[1]
result = [
'', 'Program Files', 'Microsoft Visual Studio 10.0', 'Common7', 'IDE']
assert split_path(path) == result
def test_path_split_2():
path = os.path.splitdrive(
r'Program Files\Microsoft Visual Studio 10.0\Common7\IDE')[1]
result = [
'Program Files', 'Microsoft Visual Studio 10.0', 'Common7', 'IDE']
assert split_path(path) == result
|
the-blue-alliance/the-blue-alliance | refs/heads/master | helpers/user_bundle.py | 8 | from google.appengine.ext import ndb
from google.appengine.api import users
from models.account import Account
from models.mobile_client import MobileClient
class UserBundle(object):
"""
UserBundle encapsulates a bunch of Google AppEngine user management stuff
to make it easier for templates.
"""
def __init__(self):
self._account = None
@property
def account(self):
if self._account is None and self.user:
self._account = Account.get_or_insert(
self.user.user_id(),
email = self.user.email(),
nickname = self.user.nickname(),
registered = False,
display_name = self.user.nickname())
return self._account
@property
def user(self):
return users.get_current_user()
@property
def mobile_clients(self):
user_id = self.user.user_id()
return MobileClient.query(ancestor=ndb.Key(Account, user_id)).fetch()
@property
def is_current_user_admin(self):
return users.is_current_user_admin()
@property
def login_url(self):
return users.create_login_url("/")
@property
def logout_url(self):
return users.create_logout_url("/")
def create_login_url(self, target_url="/"):
return users.create_login_url(target_url)
def create_logout_url(self, target_url="/"):
return users.create_logout_url(target_url)
|
guettli/django | refs/heads/master | django/conf/locale/ja/formats.py | 619 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'Y年n月j日G:i'
YEAR_MONTH_FORMAT = 'Y年n月'
MONTH_DAY_FORMAT = 'n月j日'
SHORT_DATE_FORMAT = 'Y/m/d'
SHORT_DATETIME_FORMAT = 'Y/m/d G:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
mhnatiuk/phd_sociology_of_religion | refs/heads/master | scrapper/build/scrapy/scrapy/xlib/pydispatch/dispatcher.py | 15 | """Multiple-producer-multiple-consumer signal-dispatching
dispatcher is the core of the PyDispatcher system,
providing the primary API and the core logic for the
system.
Module attributes of note:
Any -- Singleton used to signal either "Any Sender" or
"Any Signal". See documentation of the _Any class.
Anonymous -- Singleton used to signal "Anonymous Sender"
See documentation of the _Anonymous class.
Internal attributes:
WEAKREF_TYPES -- tuple of types/classes which represent
weak references to receivers, and thus must be de-
referenced on retrieval to retrieve the callable
object
connections -- { senderkey (id) : { signal : [receivers...]}}
senders -- { senderkey (id) : weakref(sender) }
used for cleaning up sender references on sender
deletion
sendersBack -- { receiverkey (id) : [senderkey (id)...] }
used for cleaning up receiver references on receiver
deletion, (considerably speeds up the cleanup process
vs. the original code.)
"""
from __future__ import generators
import types, weakref
from scrapy.xlib.pydispatch import saferef, robustapply, errors
__author__ = "Patrick K. O'Brien <pobrien@orbtech.com>"
__cvsid__ = "$Id: dispatcher.py,v 1.1.1.1 2006/07/07 15:59:38 mcfletch Exp $"
__version__ = "$Revision: 1.1.1.1 $"[11:-2]
class _Parameter:
"""Used to represent default parameter values."""
def __repr__(self):
return self.__class__.__name__
class _Any(_Parameter):
"""Singleton used to signal either "Any Sender" or "Any Signal"
The Any object can be used with connect, disconnect,
send, or sendExact to signal that the parameter given
Any should react to all senders/signals, not just
a particular sender/signal.
"""
Any = _Any()
class _Anonymous(_Parameter):
"""Singleton used to signal "Anonymous Sender"
The Anonymous object is used to signal that the sender
of a message is not specified (as distinct from being
"any sender"). Registering callbacks for Anonymous
will only receive messages sent without senders. Sending
with anonymous will only send messages to those receivers
registered for Any or Anonymous.
Note:
The default sender for connect is Any, while the
default sender for send is Anonymous. This has
the effect that if you do not specify any senders
in either function then all messages are routed
as though there was a single sender (Anonymous)
being used everywhere.
"""
Anonymous = _Anonymous()
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
connections = {}
senders = {}
sendersBack = {}
def connect(receiver, signal=Any, sender=Any, weak=True):
"""Connect receiver to sender for signal
receiver -- a callable Python object which is to receive
messages/signals/events. Receivers must be hashable
objects.
if weak is True, then receiver must be weak-referencable
(more precisely saferef.safeRef() must be able to create
a reference to the receiver).
Receivers are fairly flexible in their specification,
as the machinery in the robustApply module takes care
of most of the details regarding figuring out appropriate
subsets of the sent arguments to apply to a given
receiver.
Note:
if receiver is itself a weak reference (a callable),
it will be de-referenced by the system's machinery,
so *generally* weak references are not suitable as
receivers, though some use might be found for the
facility whereby a higher-level library passes in
pre-weakrefed receiver references.
signal -- the signal to which the receiver should respond
if Any, receiver will receive any signal from the
indicated sender (which might also be Any, but is not
necessarily Any).
Otherwise must be a hashable Python object other than
None (DispatcherError raised on None).
sender -- the sender to which the receiver should respond
if Any, receiver will receive the indicated signals
from any sender.
if Anonymous, receiver will only receive indicated
signals from send/sendExact which do not specify a
sender, or specify Anonymous explicitly as the sender.
Otherwise can be any python object.
weak -- whether to use weak references to the receiver
By default, the module will attempt to use weak
references to the receiver objects. If this parameter
is false, then strong references will be used.
returns None, may raise DispatcherTypeError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender)
)
if weak:
receiver = saferef.safeRef(receiver, onDelete=_removeReceiver)
senderkey = id(sender)
if connections.has_key(senderkey):
signals = connections[senderkey]
else:
connections[senderkey] = signals = {}
# Keep track of senders for cleanup.
# Is Anonymous something we want to clean up?
if sender not in (None, Anonymous, Any):
def remove(object, senderkey=senderkey):
_removeSender(senderkey=senderkey)
# Skip objects that can not be weakly referenced, which means
# they won't be automatically cleaned up, but that's too bad.
try:
weakSender = weakref.ref(sender, remove)
senders[senderkey] = weakSender
except:
pass
receiverID = id(receiver)
# get current set, remove any current references to
# this receiver in the set, including back-references
if signals.has_key(signal):
receivers = signals[signal]
_removeOldBackRefs(senderkey, signal, receiver, receivers)
else:
receivers = signals[signal] = []
try:
current = sendersBack.get( receiverID )
if current is None:
sendersBack[ receiverID ] = current = []
if senderkey not in current:
current.append(senderkey)
except:
pass
receivers.append(receiver)
def disconnect(receiver, signal=Any, sender=Any, weak=True):
"""Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect
signal -- the registered signal to disconnect
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
disconnect reverses the process of connect,
the semantics for the individual elements are
logically equivalent to a tuple of
(receiver, signal, sender, weak) used as a key
to be deleted from the internal routing tables.
(The actual process is slightly more complex
but the semantics are basically the same).
Note:
Using disconnect is not required to cleanup
routing when an object is deleted, the framework
will remove routes for deleted objects
automatically. It's only necessary to disconnect
if you want to stop routing to a live object.
returns None, may raise DispatcherTypeError or
DispatcherKeyError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender)
)
if weak: receiver = saferef.safeRef(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise errors.DispatcherKeyError(
"""No receivers found for signal %r from sender %r""" %(
signal,
sender
)
)
try:
# also removes from receivers
_removeOldBackRefs(senderkey, signal, receiver, receivers)
except ValueError:
raise errors.DispatcherKeyError(
"""No connection to receiver %s for signal %s from sender %s""" %(
receiver,
signal,
sender
)
)
_cleanupConnections(senderkey, signal)
def getReceivers( sender = Any, signal = Any ):
"""Get list of receivers from global tables
This utility function allows you to retrieve the
raw list of receivers from the connections table
for the given sender and signal pair.
Note:
there is no guarantee that this is the actual list
stored in the connections table, so the value
should be treated as a simple iterable/truth value
rather than, for instance a list to which you
might append new records.
Normally you would use liveReceivers( getReceivers( ...))
to retrieve the actual receiver objects as an iterable
object.
"""
try:
return connections[id(sender)][signal]
except KeyError:
return []
def liveReceivers(receivers):
"""Filter sequence of receivers to get resolved, live receivers
This is a generator which will iterate over
the passed sequence, checking for weak references
and resolving them, then returning all live
receivers.
"""
for receiver in receivers:
if isinstance( receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
yield receiver
else:
yield receiver
def getAllReceivers( sender = Any, signal = Any ):
"""Get list of all receivers from global tables
This gets all receivers which should receive
the given signal from sender, each receiver should
be produced only once by the resulting generator
"""
receivers = {}
for set in (
# Get receivers that receive *this* signal from *this* sender.
getReceivers( sender, signal ),
# Add receivers that receive *any* signal from *this* sender.
getReceivers( sender, Any ),
# Add receivers that receive *this* signal from *any* sender.
getReceivers( Any, signal ),
# Add receivers that receive *any* signal from *any* sender.
getReceivers( Any, Any ),
):
for receiver in set:
if receiver: # filter out dead instance-method weakrefs
try:
if not receivers.has_key( receiver ):
receivers[receiver] = 1
yield receiver
except TypeError:
# dead weakrefs raise TypeError on hash...
pass
def send(signal=Any, sender=Anonymous, *arguments, **named):
"""Send signal from sender to all connected receivers.
signal -- (hashable) signal value, see connect for details
sender -- the sender of the signal
if Any, only receivers registered for Any will receive
the message.
if Anonymous, only receivers registered to receive
messages from Anonymous or Any will receive the message
Otherwise can be any python object (normally one
registered with a connect if you actually want
something to occur).
arguments -- positional arguments which will be passed to
*all* receivers. Note that this may raise TypeErrors
if the receivers do not allow the particular arguments.
Note also that arguments are applied before named
arguments, so they should be used with care.
named -- named arguments which will be filtered according
to the parameters of the receivers to only provide those
acceptable to the receiver.
Return a list of tuple pairs [(receiver, response), ... ]
if any receiver raises an error, the error propagates back
through send, terminating the dispatch loop, so it is quite
possible to not have all receivers called if a raises an
error.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
response = robustapply.robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def sendExact( signal=Any, sender=Anonymous, *arguments, **named ):
"""Send signal only to those receivers registered for exact message
sendExact allows for avoiding Any/Anonymous registered
handlers, sending only to those receivers explicitly
registered for a particular signal on a particular
sender.
"""
responses = []
for receiver in liveReceivers(getReceivers(sender, signal)):
response = robustapply.robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def _removeReceiver(receiver):
"""Remove receiver from connections."""
if not sendersBack:
# During module cleanup the mapping will be replaced with None
return False
backKey = id(receiver)
try:
backSet = sendersBack.pop(backKey)
except KeyError as err:
return False
else:
for senderkey in backSet:
try:
signals = connections[senderkey].keys()
except KeyError as err:
pass
else:
for signal in signals:
try:
receivers = connections[senderkey][signal]
except KeyError:
pass
else:
try:
receivers.remove( receiver )
except Exception as err:
pass
_cleanupConnections(senderkey, signal)
def _cleanupConnections(senderkey, signal):
"""Delete any empty signals for senderkey. Delete senderkey if empty."""
try:
receivers = connections[senderkey][signal]
except:
pass
else:
if not receivers:
# No more connected receivers. Therefore, remove the signal.
try:
signals = connections[senderkey]
except KeyError:
pass
else:
del signals[signal]
if not signals:
# No more signal connections. Therefore, remove the sender.
_removeSender(senderkey)
def _removeSender(senderkey):
"""Remove senderkey from connections."""
_removeBackrefs(senderkey)
try:
del connections[senderkey]
except KeyError:
pass
# Senderkey will only be in senders dictionary if sender
# could be weakly referenced.
try:
del senders[senderkey]
except:
pass
def _removeBackrefs( senderkey):
"""Remove all back-references to this senderkey"""
try:
signals = connections[senderkey]
except KeyError:
signals = None
else:
items = signals.items()
def allReceivers( ):
for signal,set in items:
for item in set:
yield item
for receiver in allReceivers():
_killBackref( receiver, senderkey )
def _removeOldBackRefs(senderkey, signal, receiver, receivers):
"""Kill old sendersBack references from receiver
This guards against multiple registration of the same
receiver for a given signal and sender leaking memory
as old back reference records build up.
Also removes old receiver instance from receivers
"""
try:
index = receivers.index(receiver)
# need to scan back references here and remove senderkey
except ValueError:
return False
else:
oldReceiver = receivers[index]
del receivers[index]
found = 0
signals = connections.get(signal)
if signals is not None:
for sig,recs in connections.get(signal,{}).iteritems():
if sig != signal:
for rec in recs:
if rec is oldReceiver:
found = 1
break
if not found:
_killBackref( oldReceiver, senderkey )
return True
return False
def _killBackref( receiver, senderkey ):
"""Do the actual removal of back reference from receiver to senderkey"""
receiverkey = id(receiver)
set = sendersBack.get( receiverkey, () )
while senderkey in set:
try:
set.remove( senderkey )
except:
break
if not set:
try:
del sendersBack[ receiverkey ]
except KeyError:
pass
return True
|
vertigo235/Sick-Beard-XEM | refs/heads/master | lib/unidecode/x0bd.py | 253 | data = (
'bols', # 0x00
'bolt', # 0x01
'bolp', # 0x02
'bolh', # 0x03
'bom', # 0x04
'bob', # 0x05
'bobs', # 0x06
'bos', # 0x07
'boss', # 0x08
'bong', # 0x09
'boj', # 0x0a
'boc', # 0x0b
'bok', # 0x0c
'bot', # 0x0d
'bop', # 0x0e
'boh', # 0x0f
'bwa', # 0x10
'bwag', # 0x11
'bwagg', # 0x12
'bwags', # 0x13
'bwan', # 0x14
'bwanj', # 0x15
'bwanh', # 0x16
'bwad', # 0x17
'bwal', # 0x18
'bwalg', # 0x19
'bwalm', # 0x1a
'bwalb', # 0x1b
'bwals', # 0x1c
'bwalt', # 0x1d
'bwalp', # 0x1e
'bwalh', # 0x1f
'bwam', # 0x20
'bwab', # 0x21
'bwabs', # 0x22
'bwas', # 0x23
'bwass', # 0x24
'bwang', # 0x25
'bwaj', # 0x26
'bwac', # 0x27
'bwak', # 0x28
'bwat', # 0x29
'bwap', # 0x2a
'bwah', # 0x2b
'bwae', # 0x2c
'bwaeg', # 0x2d
'bwaegg', # 0x2e
'bwaegs', # 0x2f
'bwaen', # 0x30
'bwaenj', # 0x31
'bwaenh', # 0x32
'bwaed', # 0x33
'bwael', # 0x34
'bwaelg', # 0x35
'bwaelm', # 0x36
'bwaelb', # 0x37
'bwaels', # 0x38
'bwaelt', # 0x39
'bwaelp', # 0x3a
'bwaelh', # 0x3b
'bwaem', # 0x3c
'bwaeb', # 0x3d
'bwaebs', # 0x3e
'bwaes', # 0x3f
'bwaess', # 0x40
'bwaeng', # 0x41
'bwaej', # 0x42
'bwaec', # 0x43
'bwaek', # 0x44
'bwaet', # 0x45
'bwaep', # 0x46
'bwaeh', # 0x47
'boe', # 0x48
'boeg', # 0x49
'boegg', # 0x4a
'boegs', # 0x4b
'boen', # 0x4c
'boenj', # 0x4d
'boenh', # 0x4e
'boed', # 0x4f
'boel', # 0x50
'boelg', # 0x51
'boelm', # 0x52
'boelb', # 0x53
'boels', # 0x54
'boelt', # 0x55
'boelp', # 0x56
'boelh', # 0x57
'boem', # 0x58
'boeb', # 0x59
'boebs', # 0x5a
'boes', # 0x5b
'boess', # 0x5c
'boeng', # 0x5d
'boej', # 0x5e
'boec', # 0x5f
'boek', # 0x60
'boet', # 0x61
'boep', # 0x62
'boeh', # 0x63
'byo', # 0x64
'byog', # 0x65
'byogg', # 0x66
'byogs', # 0x67
'byon', # 0x68
'byonj', # 0x69
'byonh', # 0x6a
'byod', # 0x6b
'byol', # 0x6c
'byolg', # 0x6d
'byolm', # 0x6e
'byolb', # 0x6f
'byols', # 0x70
'byolt', # 0x71
'byolp', # 0x72
'byolh', # 0x73
'byom', # 0x74
'byob', # 0x75
'byobs', # 0x76
'byos', # 0x77
'byoss', # 0x78
'byong', # 0x79
'byoj', # 0x7a
'byoc', # 0x7b
'byok', # 0x7c
'byot', # 0x7d
'byop', # 0x7e
'byoh', # 0x7f
'bu', # 0x80
'bug', # 0x81
'bugg', # 0x82
'bugs', # 0x83
'bun', # 0x84
'bunj', # 0x85
'bunh', # 0x86
'bud', # 0x87
'bul', # 0x88
'bulg', # 0x89
'bulm', # 0x8a
'bulb', # 0x8b
'buls', # 0x8c
'bult', # 0x8d
'bulp', # 0x8e
'bulh', # 0x8f
'bum', # 0x90
'bub', # 0x91
'bubs', # 0x92
'bus', # 0x93
'buss', # 0x94
'bung', # 0x95
'buj', # 0x96
'buc', # 0x97
'buk', # 0x98
'but', # 0x99
'bup', # 0x9a
'buh', # 0x9b
'bweo', # 0x9c
'bweog', # 0x9d
'bweogg', # 0x9e
'bweogs', # 0x9f
'bweon', # 0xa0
'bweonj', # 0xa1
'bweonh', # 0xa2
'bweod', # 0xa3
'bweol', # 0xa4
'bweolg', # 0xa5
'bweolm', # 0xa6
'bweolb', # 0xa7
'bweols', # 0xa8
'bweolt', # 0xa9
'bweolp', # 0xaa
'bweolh', # 0xab
'bweom', # 0xac
'bweob', # 0xad
'bweobs', # 0xae
'bweos', # 0xaf
'bweoss', # 0xb0
'bweong', # 0xb1
'bweoj', # 0xb2
'bweoc', # 0xb3
'bweok', # 0xb4
'bweot', # 0xb5
'bweop', # 0xb6
'bweoh', # 0xb7
'bwe', # 0xb8
'bweg', # 0xb9
'bwegg', # 0xba
'bwegs', # 0xbb
'bwen', # 0xbc
'bwenj', # 0xbd
'bwenh', # 0xbe
'bwed', # 0xbf
'bwel', # 0xc0
'bwelg', # 0xc1
'bwelm', # 0xc2
'bwelb', # 0xc3
'bwels', # 0xc4
'bwelt', # 0xc5
'bwelp', # 0xc6
'bwelh', # 0xc7
'bwem', # 0xc8
'bweb', # 0xc9
'bwebs', # 0xca
'bwes', # 0xcb
'bwess', # 0xcc
'bweng', # 0xcd
'bwej', # 0xce
'bwec', # 0xcf
'bwek', # 0xd0
'bwet', # 0xd1
'bwep', # 0xd2
'bweh', # 0xd3
'bwi', # 0xd4
'bwig', # 0xd5
'bwigg', # 0xd6
'bwigs', # 0xd7
'bwin', # 0xd8
'bwinj', # 0xd9
'bwinh', # 0xda
'bwid', # 0xdb
'bwil', # 0xdc
'bwilg', # 0xdd
'bwilm', # 0xde
'bwilb', # 0xdf
'bwils', # 0xe0
'bwilt', # 0xe1
'bwilp', # 0xe2
'bwilh', # 0xe3
'bwim', # 0xe4
'bwib', # 0xe5
'bwibs', # 0xe6
'bwis', # 0xe7
'bwiss', # 0xe8
'bwing', # 0xe9
'bwij', # 0xea
'bwic', # 0xeb
'bwik', # 0xec
'bwit', # 0xed
'bwip', # 0xee
'bwih', # 0xef
'byu', # 0xf0
'byug', # 0xf1
'byugg', # 0xf2
'byugs', # 0xf3
'byun', # 0xf4
'byunj', # 0xf5
'byunh', # 0xf6
'byud', # 0xf7
'byul', # 0xf8
'byulg', # 0xf9
'byulm', # 0xfa
'byulb', # 0xfb
'byuls', # 0xfc
'byult', # 0xfd
'byulp', # 0xfe
'byulh', # 0xff
)
|
kobejean/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/bijectors/invert.py | 35 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Invert bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Invert",
]
class Invert(bijector.Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
distribution=Gamma(concentration=1., rate=2.),
bijector=bijector.Invert(bijector.Exp())
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **kwargs)
return -self.inverse_log_det_jacobian(y, **kwargs)
```
Args:
bijector: Bijector instance.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
"""
if not bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError(
"Invert is not implemented for non-injective bijectors.")
self._bijector = bijector
super(Invert, self).__init__(
graph_parents=bijector.graph_parents,
forward_min_event_ndims=bijector.inverse_min_event_ndims,
inverse_min_event_ndims=bijector.forward_min_event_ndims,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
def _forward_event_shape(self, input_shape):
return self.bijector._inverse_event_shape(input_shape) # pylint: disable=protected-access
def _forward_event_shape_tensor(self, input_shape):
return self.bijector._inverse_event_shape_tensor(input_shape) # pylint: disable=protected-access
def _inverse_event_shape(self, output_shape):
return self.bijector._forward_event_shape(output_shape) # pylint: disable=protected-access
def _inverse_event_shape_tensor(self, output_shape):
return self.bijector._forward_event_shape_tensor(output_shape) # pylint: disable=protected-access
@property
def bijector(self):
return self._bijector
def _forward(self, x, **kwargs):
return self.bijector._inverse(x, **kwargs) # pylint: disable=protected-access
def _inverse(self, y, **kwargs):
return self.bijector._forward(y, **kwargs) # pylint: disable=protected-access
def _inverse_log_det_jacobian(self, y, **kwargs):
return self.bijector._forward_log_det_jacobian(y, **kwargs) # pylint: disable=protected-access
def _forward_log_det_jacobian(self, x, **kwargs):
return self.bijector._inverse_log_det_jacobian(x, **kwargs) # pylint: disable=protected-access
|
larsmans/scikit-learn | refs/heads/master | examples/model_selection/grid_search_text_feature_extraction.py | 253 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
devs4v/devs4v-information-retrieval15 | refs/heads/master | project/venv/lib/python2.7/site-packages/django/contrib/admin/validation.py | 82 | from django.contrib.admin.utils import NotRelationField, get_fields_from_path
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import models
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
"""
Does basic ModelAdmin option validation. Calls custom validation
classmethod in the end if it is provided in cls. The signature of the
custom validation classmethod should be: def validate(cls, model).
"""
__all__ = ['BaseValidator', 'InlineValidator']
class BaseValidator(object):
def validate(self, cls, model):
for m in dir(self):
if m.startswith('validate_'):
getattr(self, m)(cls, model)
def check_field_spec(self, cls, model, flds, label):
"""
Validate the fields specification in `flds` from a ModelAdmin subclass
`cls` for the `model` model. Use `label` for reporting problems to the user.
The fields specification can be a ``fields`` option or a ``fields``
sub-option from a ``fieldsets`` option component.
"""
for fields in flds:
# The entry in fields might be a tuple. If it is a standalone
# field, make it into a tuple to make processing easier.
if type(fields) != tuple:
fields = (fields,)
for field in fields:
if field in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a
# model field if it's in readonly_fields,
# readonly_fields will handle the validation of such
# things.
continue
try:
f = model._meta.get_field(field)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could be an
# extra field on the form; nothing to check so move on to the next field.
continue
if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created:
raise ImproperlyConfigured("'%s.%s' "
"can't include the ManyToManyField field '%s' because "
"'%s' manually specifies a 'through' model." % (
cls.__name__, label, field, field))
def validate_raw_id_fields(self, cls, model):
" Validate that raw_id_fields only contains field names that are listed on the model. "
if hasattr(cls, 'raw_id_fields'):
check_isseq(cls, 'raw_id_fields', cls.raw_id_fields)
for idx, field in enumerate(cls.raw_id_fields):
f = get_field(cls, model, 'raw_id_fields', field)
if not isinstance(f, (models.ForeignKey, models.ManyToManyField)):
raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must "
"be either a ForeignKey or ManyToManyField."
% (cls.__name__, idx, field))
def validate_fields(self, cls, model):
" Validate that fields only refer to existing fields, doesn't contain duplicates. "
# fields
if cls.fields: # default value is None
check_isseq(cls, 'fields', cls.fields)
self.check_field_spec(cls, model, cls.fields, 'fields')
if cls.fieldsets:
raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__)
if len(cls.fields) > len(set(cls.fields)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__)
def validate_fieldsets(self, cls, model):
" Validate that fieldsets is properly formatted and doesn't contain duplicates. "
from django.contrib.admin.options import flatten_fieldsets
if cls.fieldsets: # default value is None
check_isseq(cls, 'fieldsets', cls.fieldsets)
for idx, fieldset in enumerate(cls.fieldsets):
check_isseq(cls, 'fieldsets[%d]' % idx, fieldset)
if len(fieldset) != 2:
raise ImproperlyConfigured("'%s.fieldsets[%d]' does not "
"have exactly two elements." % (cls.__name__, idx))
check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1])
if 'fields' not in fieldset[1]:
raise ImproperlyConfigured("'fields' key is required in "
"%s.fieldsets[%d][1] field options dict."
% (cls.__name__, idx))
self.check_field_spec(cls, model, fieldset[1]['fields'], "fieldsets[%d][1]['fields']" % idx)
flattened_fieldsets = flatten_fieldsets(cls.fieldsets)
if len(flattened_fieldsets) > len(set(flattened_fieldsets)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__)
def validate_exclude(self, cls, model):
" Validate that exclude is a sequence without duplicates. "
if cls.exclude: # default value is None
check_isseq(cls, 'exclude', cls.exclude)
if len(cls.exclude) > len(set(cls.exclude)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__)
def validate_form(self, cls, model):
" Validate that form subclasses BaseModelForm. "
if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm):
raise ImproperlyConfigured("%s.form does not inherit from "
"BaseModelForm." % cls.__name__)
def validate_filter_vertical(self, cls, model):
" Validate that filter_vertical is a sequence of field names. "
if hasattr(cls, 'filter_vertical'):
check_isseq(cls, 'filter_vertical', cls.filter_vertical)
for idx, field in enumerate(cls.filter_vertical):
f = get_field(cls, model, 'filter_vertical', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
def validate_filter_horizontal(self, cls, model):
" Validate that filter_horizontal is a sequence of field names. "
if hasattr(cls, 'filter_horizontal'):
check_isseq(cls, 'filter_horizontal', cls.filter_horizontal)
for idx, field in enumerate(cls.filter_horizontal):
f = get_field(cls, model, 'filter_horizontal', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
def validate_radio_fields(self, cls, model):
" Validate that radio_fields is a dictionary of choice or foreign key fields. "
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if hasattr(cls, 'radio_fields'):
check_isdict(cls, 'radio_fields', cls.radio_fields)
for field, val in cls.radio_fields.items():
f = get_field(cls, model, 'radio_fields', field)
if not (isinstance(f, models.ForeignKey) or f.choices):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither an instance of ForeignKey nor does "
"have choices set." % (cls.__name__, field))
if val not in (HORIZONTAL, VERTICAL):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither admin.HORIZONTAL nor admin.VERTICAL."
% (cls.__name__, field))
def validate_prepopulated_fields(self, cls, model):
" Validate that prepopulated_fields if a dictionary containing allowed field types. "
# prepopulated_fields
if hasattr(cls, 'prepopulated_fields'):
check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields)
for field, val in cls.prepopulated_fields.items():
f = get_field(cls, model, 'prepopulated_fields', field)
if isinstance(f, (models.DateTimeField, models.ForeignKey,
models.ManyToManyField)):
raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' "
"is either a DateTimeField, ForeignKey or "
"ManyToManyField. This isn't allowed."
% (cls.__name__, field))
check_isseq(cls, "prepopulated_fields['%s']" % field, val)
for idx, f in enumerate(val):
get_field(cls, model, "prepopulated_fields['%s'][%d]" % (field, idx), f)
def validate_view_on_site_url(self, cls, model):
if hasattr(cls, 'view_on_site'):
if not callable(cls.view_on_site) and not isinstance(cls.view_on_site, bool):
raise ImproperlyConfigured("%s.view_on_site is not a callable or a boolean value." % cls.__name__)
def validate_ordering(self, cls, model):
" Validate that ordering refers to existing fields or is random. "
# ordering = None
if cls.ordering:
check_isseq(cls, 'ordering', cls.ordering)
for idx, field in enumerate(cls.ordering):
if field == '?' and len(cls.ordering) != 1:
raise ImproperlyConfigured("'%s.ordering' has the random "
"ordering marker '?', but contains other fields as "
"well. Please either remove '?' or the other fields."
% cls.__name__)
if field == '?':
continue
if field.startswith('-'):
field = field[1:]
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field:
continue
get_field(cls, model, 'ordering[%d]' % idx, field)
def validate_readonly_fields(self, cls, model):
" Validate that readonly_fields refers to proper attribute or field. "
if hasattr(cls, "readonly_fields"):
check_isseq(cls, "readonly_fields", cls.readonly_fields)
for idx, field in enumerate(cls.readonly_fields):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
model._meta.get_field(field)
except FieldDoesNotExist:
raise ImproperlyConfigured(
"%s.readonly_fields[%d], %r is not a callable or "
"an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name)
)
class ModelAdminValidator(BaseValidator):
def validate_save_as(self, cls, model):
" Validate save_as is a boolean. "
check_type(cls, 'save_as', bool)
def validate_save_on_top(self, cls, model):
" Validate save_on_top is a boolean. "
check_type(cls, 'save_on_top', bool)
def validate_inlines(self, cls, model):
" Validate inline model admin classes. "
from django.contrib.admin.options import BaseModelAdmin
if hasattr(cls, 'inlines'):
check_isseq(cls, 'inlines', cls.inlines)
for idx, inline in enumerate(cls.inlines):
if not issubclass(inline, BaseModelAdmin):
raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit "
"from BaseModelAdmin." % (cls.__name__, idx))
if not inline.model:
raise ImproperlyConfigured("'model' is a required attribute "
"of '%s.inlines[%d]'." % (cls.__name__, idx))
if not issubclass(inline.model, models.Model):
raise ImproperlyConfigured("'%s.inlines[%d].model' does not "
"inherit from models.Model." % (cls.__name__, idx))
inline.validate(inline.model)
self.check_inline(inline, model)
def check_inline(self, cls, parent_model):
" Validate inline class's fk field is not excluded. "
fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True)
if hasattr(cls, 'exclude') and cls.exclude:
if fk and fk.name in cls.exclude:
raise ImproperlyConfigured("%s cannot exclude the field "
"'%s' - this is the foreign key to the parent model "
"%s.%s." % (cls.__name__, fk.name, parent_model._meta.app_label, parent_model.__name__))
def validate_list_display(self, cls, model):
" Validate that list_display only contains fields or usable attributes. "
if hasattr(cls, 'list_display'):
check_isseq(cls, 'list_display', cls.list_display)
for idx, field in enumerate(cls.list_display):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
model._meta.get_field(field)
except FieldDoesNotExist:
raise ImproperlyConfigured(
"%s.list_display[%d], %r is not a callable or "
"an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name)
)
else:
# getattr(model, field) could be an X_RelatedObjectsDescriptor
f = fetch_attr(cls, model, "list_display[%d]" % idx, field)
if isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured(
"'%s.list_display[%d]', '%s' is a ManyToManyField "
"which is not supported."
% (cls.__name__, idx, field)
)
def validate_list_display_links(self, cls, model):
" Validate that list_display_links either is None or a unique subset of list_display."
if hasattr(cls, 'list_display_links'):
if cls.list_display_links is None:
return
check_isseq(cls, 'list_display_links', cls.list_display_links)
for idx, field in enumerate(cls.list_display_links):
if field not in cls.list_display:
raise ImproperlyConfigured("'%s.list_display_links[%d]' "
"refers to '%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field))
def validate_list_filter(self, cls, model):
"""
Validate that list_filter is a sequence of one of three options:
1: 'field' - a basic field filter, possibly w/ relationships (eg, 'field__rel')
2: ('field', SomeFieldListFilter) - a field-based list filter class
3: SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if hasattr(cls, 'list_filter'):
check_isseq(cls, 'list_filter', cls.list_filter)
for idx, item in enumerate(cls.list_filter):
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is not a descendant of ListFilter."
% (cls.__name__, idx, item.__name__))
# ... but not a FieldListFilter.
if issubclass(item, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is of type FieldListFilter but is not"
" associated with a field name."
% (cls.__name__, idx, item.__name__))
else:
if isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d][1]'"
" is '%s' which is not of type FieldListFilter."
% (cls.__name__, idx, list_filter_class.__name__))
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
raise ImproperlyConfigured("'%s.list_filter[%d]' refers to '%s'"
" which does not refer to a Field."
% (cls.__name__, idx, field))
def validate_list_select_related(self, cls, model):
" Validate that list_select_related is a boolean, a list or a tuple. "
list_select_related = getattr(cls, 'list_select_related', None)
if list_select_related:
types = (bool, tuple, list)
if not isinstance(list_select_related, types):
raise ImproperlyConfigured("'%s.list_select_related' should be "
"either a bool, a tuple or a list" %
cls.__name__)
def validate_list_per_page(self, cls, model):
" Validate that list_per_page is an integer. "
check_type(cls, 'list_per_page', int)
def validate_list_max_show_all(self, cls, model):
" Validate that list_max_show_all is an integer. "
check_type(cls, 'list_max_show_all', int)
def validate_list_editable(self, cls, model):
"""
Validate that list_editable is a sequence of editable fields from
list_display without first element.
"""
if hasattr(cls, 'list_editable') and cls.list_editable:
check_isseq(cls, 'list_editable', cls.list_editable)
for idx, field_name in enumerate(cls.list_editable):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', not defined on %s.%s."
% (cls.__name__, idx, field_name, model._meta.app_label, model.__name__))
if field_name not in cls.list_display:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to "
"'%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field_name))
if cls.list_display_links is not None:
if field_name in cls.list_display_links:
raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'"
" and '%s.list_display_links'"
% (field_name, cls.__name__, cls.__name__))
if not cls.list_display_links and cls.list_display[0] in cls.list_editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to"
" the first field in list_display, '%s', which can't be"
" used unless list_display_links is set."
% (cls.__name__, idx, cls.list_display[0]))
if not field.editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', which isn't editable through the admin."
% (cls.__name__, idx, field_name))
def validate_search_fields(self, cls, model):
" Validate search_fields is a sequence. "
if hasattr(cls, 'search_fields'):
check_isseq(cls, 'search_fields', cls.search_fields)
def validate_date_hierarchy(self, cls, model):
" Validate that date_hierarchy refers to DateField or DateTimeField. "
if cls.date_hierarchy:
f = get_field(cls, model, 'date_hierarchy', cls.date_hierarchy)
if not isinstance(f, (models.DateField, models.DateTimeField)):
raise ImproperlyConfigured("'%s.date_hierarchy is "
"neither an instance of DateField nor DateTimeField."
% cls.__name__)
class InlineValidator(BaseValidator):
def validate_fk_name(self, cls, model):
" Validate that fk_name refers to a ForeignKey. "
if cls.fk_name: # default value is None
f = get_field(cls, model, 'fk_name', cls.fk_name)
if not isinstance(f, models.ForeignKey):
raise ImproperlyConfigured("'%s.fk_name is not an instance of "
"models.ForeignKey." % cls.__name__)
def validate_extra(self, cls, model):
" Validate that extra is an integer. "
check_type(cls, 'extra', int)
def validate_max_num(self, cls, model):
" Validate that max_num is an integer. "
check_type(cls, 'max_num', int)
def validate_formset(self, cls, model):
" Validate formset is a subclass of BaseModelFormSet. "
if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseModelFormSet):
raise ImproperlyConfigured("'%s.formset' does not inherit from "
"BaseModelFormSet." % cls.__name__)
def check_type(cls, attr, type_):
if getattr(cls, attr, None) is not None and not isinstance(getattr(cls, attr), type_):
raise ImproperlyConfigured("'%s.%s' should be a %s."
% (cls.__name__, attr, type_.__name__))
def check_isseq(cls, label, obj):
if not isinstance(obj, (list, tuple)):
raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label))
def check_isdict(cls, label, obj):
if not isinstance(obj, dict):
raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label))
def get_field(cls, model, label, field):
try:
return model._meta.get_field(field)
except FieldDoesNotExist:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
def fetch_attr(cls, model, label, field):
try:
return model._meta.get_field(field)
except FieldDoesNotExist:
pass
try:
return getattr(model, field)
except AttributeError:
raise ImproperlyConfigured(
"'%s.%s' refers to '%s' that is neither a field, method or "
"property of model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__)
)
|
neumerance/deploy | refs/heads/master | .venv/lib/python2.7/site-packages/cinderclient/tests/v1/test_quotas.py | 3 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.tests import utils
from cinderclient.tests.v1 import fakes
cs = fakes.FakeClient()
class QuotaSetsTest(utils.TestCase):
def test_tenant_quotas_get(self):
tenant_id = 'test'
cs.quotas.get(tenant_id)
cs.assert_called('GET', '/os-quota-sets/%s' % tenant_id)
def test_tenant_quotas_defaults(self):
tenant_id = 'test'
cs.quotas.defaults(tenant_id)
cs.assert_called('GET', '/os-quota-sets/%s/defaults' % tenant_id)
def test_update_quota(self):
q = cs.quotas.get('test')
q.update(volumes=2)
q.update(snapshots=2)
cs.assert_called('PUT', '/os-quota-sets/test')
def test_refresh_quota(self):
q = cs.quotas.get('test')
q2 = cs.quotas.get('test')
self.assertEqual(q.volumes, q2.volumes)
self.assertEqual(q.snapshots, q2.snapshots)
q2.volumes = 0
self.assertNotEqual(q.volumes, q2.volumes)
q2.snapshots = 0
self.assertNotEqual(q.snapshots, q2.snapshots)
q2.get()
self.assertEqual(q.volumes, q2.volumes)
self.assertEqual(q.snapshots, q2.snapshots)
|
quasiben/doto | refs/heads/master | doto/commands/wait.py | 2 | '''
Info of a droplet
eg:
doto info droplet_name ip_address
'''
from __future__ import print_function
from doto import connect_d0
import sys
from doto.event import Event
def print_callback(wait_status, event):
print("Waiting for status to be %s (got %s) at %s%%" % (wait_status, event.status, event.data.get('percentage') or 0))
def main(args):
d0 = connect_d0()
e = Event(d0._conn, args.event_id)
print("Event %s" % e)
if args.wait:
e.wait(callback=print_callback)
print("Finished")
def add_parser(subparsers):
parser = subparsers.add_parser('event',
help="Get droplet status",
description=__doc__)
parser.add_argument("event_id", type=int)
parser.set_defaults(main=main, sub_parser=parser)
|
altsen/diandiyun-platform | refs/heads/master | lms/djangoapps/course_wiki/course_nav.py | 6 | import re
from urlparse import urlparse
from django.http import Http404
from django.shortcuts import redirect
from django.conf import settings
from django.core.exceptions import PermissionDenied
from wiki.models import reverse as wiki_reverse
from courseware.access import has_access
from courseware.courses import get_course_with_access
from student.models import CourseEnrollment
IN_COURSE_WIKI_REGEX = r'/courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/wiki/(?P<wiki_path>.*|)$'
IN_COURSE_WIKI_COMPILED_REGEX = re.compile(IN_COURSE_WIKI_REGEX)
WIKI_ROOT_ACCESS_COMPILED_REGEX = re.compile(r'^/wiki/(?P<wiki_path>.*|)$')
class Middleware(object):
"""
This middleware is to keep the course nav bar above the wiki while
the student clicks around to other wiki pages.
If it intercepts a request for /wiki/.. that has a referrer in the
form /courses/course_id/... it will redirect the user to the page
/courses/course_id/wiki/...
It is also possible that someone followed a link leading to a course
that they don't have access to. In this case, we redirect them to the
same page on the regular wiki.
If we return a redirect, this middleware makes sure that the redirect
keeps the student in the course.
Finally, if the student is in the course viewing a wiki, we change the
reverse() function to resolve wiki urls as a course wiki url by setting
the _transform_url attribute on wiki.models.reverse.
Forgive me Father, for I have hacked.
"""
def __init__(self):
self.redirected = False
def process_request(self, request):
self.redirected = False
wiki_reverse._transform_url = lambda url: url
referer = request.META.get('HTTP_REFERER')
destination = request.path
# Check to see if we don't allow top-level access to the wiki via the /wiki/xxxx/yyy/zzz URLs
# this will help prevent people from writing pell-mell to the Wiki in an unstructured way
path_match = WIKI_ROOT_ACCESS_COMPILED_REGEX.match(destination)
if path_match and not settings.FEATURES.get('ALLOW_WIKI_ROOT_ACCESS', False):
raise PermissionDenied()
if request.method == 'GET':
new_destination = self.get_redirected_url(request.user, referer, destination)
if new_destination != destination:
# We mark that we generated this redirection, so we don't modify it again
self.redirected = True
return redirect(new_destination)
course_match = IN_COURSE_WIKI_COMPILED_REGEX.match(destination)
if course_match:
course_id = course_match.group('course_id')
# Authorization Check
# Let's see if user is enrolled or the course allows for public access
course = get_course_with_access(request.user, course_id, 'load')
if not course.allow_public_wiki_access:
is_enrolled = CourseEnrollment.is_enrolled(request.user, course.id)
is_staff = has_access(request.user, course, 'staff')
if not (is_enrolled or is_staff):
raise PermissionDenied()
prepend_string = '/courses/' + course_id
wiki_reverse._transform_url = lambda url: prepend_string + url
return None
def process_response(self, request, response):
"""
If this is a redirect response going to /wiki/*, then we might need
to change it to be a redirect going to /courses/*/wiki*.
"""
if not self.redirected and response.status_code == 302: # This is a redirect
referer = request.META.get('HTTP_REFERER')
destination_url = response['LOCATION']
destination = urlparse(destination_url).path
new_destination = self.get_redirected_url(request.user, referer, destination)
if new_destination != destination:
new_url = destination_url.replace(destination, new_destination)
response['LOCATION'] = new_url
return response
def get_redirected_url(self, user, referer, destination):
"""
Returns None if the destination shouldn't be changed.
"""
if not referer:
return destination
referer_path = urlparse(referer).path
path_match = re.match(r'^/wiki/(?P<wiki_path>.*|)$', destination)
if path_match:
# We are going to the wiki. Check if we came from a course
course_match = re.match(r'/courses/(?P<course_id>[^/]+/[^/]+/[^/]+)/.*', referer_path)
if course_match:
course_id = course_match.group('course_id')
# See if we are able to view the course. If we are, redirect to it
try:
course = get_course_with_access(user, course_id, 'load')
return "/courses/" + course.id + "/wiki/" + path_match.group('wiki_path')
except Http404:
# Even though we came from the course, we can't see it. So don't worry about it.
pass
else:
# It is also possible we are going to a course wiki view, but we
# don't have permission to see the course!
course_match = re.match(IN_COURSE_WIKI_REGEX, destination)
if course_match:
course_id = course_match.group('course_id')
# See if we are able to view the course. If we aren't, redirect to regular wiki
try:
course = get_course_with_access(user, course_id, 'load')
# Good, we can see the course. Carry on
return destination
except Http404:
# We can't see the course, so redirect to the regular wiki
return "/wiki/" + course_match.group('wiki_path')
return destination
def context_processor(request):
"""
This is a context processor which looks at the URL while we are
in the wiki. If the url is in the form
/courses/(course_id)/wiki/...
then we add 'course' to the context. This allows the course nav
bar to be shown.
"""
match = re.match(IN_COURSE_WIKI_REGEX, request.path)
if match:
course_id = match.group('course_id')
try:
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return {'course': course,
'staff_access': staff_access}
except Http404:
# We couldn't access the course for whatever reason. It is too late to change
# the URL here, so we just leave the course context. The middleware shouldn't
# let this happen
pass
return {}
|
hfp/tensorflow-xsmm | refs/heads/master | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 25 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for v in list(feeder.input_dtype.values()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for v in list(inp.values()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.cached_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.cached_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
# TODO(rohanj): Fix this test by fixing data_feeder. Currently, h5py doesn't
# support permutation based indexing lookups (More documentation at
# http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing)
def DISABLED_test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
file_path = os.path.join(self._base_dir, 'test_hdf5.h5')
h5f = h5py.File(file_path, 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File(file_path, 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
|
d3sp0s/theharvester | refs/heads/master | lib/markup.py | 43 | # This code is in the public domain, it comes
# with absolutely no warranty and you can do
# absolutely whatever you want with it.
__date__ = '17 May 2007'
__version__ = '1.7'
__doc__= """
This is markup.py - a Python module that attempts to
make it easier to generate HTML/XML from a Python program
in an intuitive, lightweight, customizable and pythonic way.
The code is in the public domain.
Version: %s as of %s.
Documentation and further info is at http://markup.sourceforge.net/
Please send bug reports, feature requests, enhancement
ideas or questions to nogradi at gmail dot com.
Installation: drop markup.py somewhere into your Python path.
""" % ( __version__, __date__ )
import string
class element:
"""This class handles the addition of a new element."""
def __init__( self, tag, case='lower', parent=None ):
self.parent = parent
if case == 'lower':
self.tag = tag.lower( )
else:
self.tag = tag.upper( )
def __call__( self, *args, **kwargs ):
if len( args ) > 1:
raise ArgumentError( self.tag )
# if class_ was defined in parent it should be added to every element
if self.parent is not None and self.parent.class_ is not None:
if 'class_' not in kwargs:
kwargs['class_'] = self.parent.class_
if self.parent is None and len( args ) == 1:
x = [ self.render( self.tag, False, myarg, mydict ) for myarg, mydict in _argsdicts( args, kwargs ) ]
return '\n'.join( x )
elif self.parent is None and len( args ) == 0:
x = [ self.render( self.tag, True, myarg, mydict ) for myarg, mydict in _argsdicts( args, kwargs ) ]
return '\n'.join( x )
if self.tag in self.parent.twotags:
for myarg, mydict in _argsdicts( args, kwargs ):
self.render( self.tag, False, myarg, mydict )
elif self.tag in self.parent.onetags:
if len( args ) == 0:
for myarg, mydict in _argsdicts( args, kwargs ):
self.render( self.tag, True, myarg, mydict ) # here myarg is always None, because len( args ) = 0
else:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
else:
raise InvalidElementError( self.tag, self.parent.mode )
def render( self, tag, single, between, kwargs ):
"""Append the actual tags to content."""
out = "<%s" % tag
for key, value in kwargs.iteritems( ):
if value is not None: # when value is None that means stuff like <... checked>
key = key.strip('_') # strip this so class_ will mean class, etc.
if key == 'http_equiv': # special cases, maybe change _ to - overall?
key = 'http-equiv'
elif key == 'accept_charset':
key = 'accept-charset'
out = "%s %s=\"%s\"" % ( out, key, escape( value ) )
else:
out = "%s %s" % ( out, key )
if between is not None:
out = "%s>%s</%s>" % ( out, between, tag )
else:
if single:
out = "%s />" % out
else:
out = "%s>" % out
if self.parent is not None:
self.parent.content.append( out )
else:
return out
def close( self ):
"""Append a closing tag unless element has only opening tag."""
if self.tag in self.parent.twotags:
self.parent.content.append( "</%s>" % self.tag )
elif self.tag in self.parent.onetags:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
def open( self, **kwargs ):
"""Append an opening tag."""
if self.tag in self.parent.twotags or self.tag in self.parent.onetags:
self.render( self.tag, False, None, kwargs )
elif self.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
class page:
"""This is our main class representing a document. Elements are added
as attributes of an instance of this class."""
def __init__( self, mode='strict_html', case='lower', onetags=None, twotags=None, separator='\n', class_=None ):
"""Stuff that effects the whole document.
mode -- 'strict_html' for HTML 4.01 (default)
'html' alias for 'strict_html'
'loose_html' to allow some deprecated elements
'xml' to allow arbitrary elements
case -- 'lower' element names will be printed in lower case (default)
'upper' they will be printed in upper case
onetags -- list or tuple of valid elements with opening tags only
twotags -- list or tuple of valid elements with both opening and closing tags
these two keyword arguments may be used to select
the set of valid elements in 'xml' mode
invalid elements will raise appropriate exceptions
separator -- string to place between added elements, defaults to newline
class_ -- a class that will be added to every element if defined"""
valid_onetags = [ "AREA", "BASE", "BR", "COL", "FRAME", "HR", "IMG", "INPUT", "LINK", "META", "PARAM" ]
valid_twotags = [ "A", "ABBR", "ACRONYM", "ADDRESS", "B", "BDO", "BIG", "BLOCKQUOTE", "BODY", "BUTTON",
"CAPTION", "CITE", "CODE", "COLGROUP", "DD", "DEL", "DFN", "DIV", "DL", "DT", "EM", "FIELDSET",
"FORM", "FRAMESET", "H1", "H2", "H3", "H4", "H5", "H6", "HEAD", "HTML", "I", "IFRAME", "INS",
"KBD", "LABEL", "LEGEND", "LI", "MAP", "NOFRAMES", "NOSCRIPT", "OBJECT", "OL", "OPTGROUP",
"OPTION", "P", "PRE", "Q", "SAMP", "SCRIPT", "SELECT", "SMALL", "SPAN", "STRONG", "STYLE",
"SUB", "SUP", "TABLE", "TBODY", "TD", "TEXTAREA", "TFOOT", "TH", "THEAD", "TITLE", "TR",
"TT", "UL", "VAR" ]
deprecated_onetags = [ "BASEFONT", "ISINDEX" ]
deprecated_twotags = [ "APPLET", "CENTER", "DIR", "FONT", "MENU", "S", "STRIKE", "U" ]
self.header = [ ]
self.content = [ ]
self.footer = [ ]
self.case = case
self.separator = separator
# init( ) sets it to True so we know that </body></html> has to be printed at the end
self._full = False
self.class_= class_
if mode == 'strict_html' or mode == 'html':
self.onetags = valid_onetags
self.onetags += map( string.lower, self.onetags )
self.twotags = valid_twotags
self.twotags += map( string.lower, self.twotags )
self.deptags = deprecated_onetags + deprecated_twotags
self.deptags += map( string.lower, self.deptags )
self.mode = 'strict_html'
elif mode == 'loose_html':
self.onetags = valid_onetags + deprecated_onetags
self.onetags += map( string.lower, self.onetags )
self.twotags = valid_twotags + deprecated_twotags
self.twotags += map( string.lower, self.twotags )
self.mode = mode
elif mode == 'xml':
if onetags and twotags:
self.onetags = onetags
self.twotags = twotags
elif ( onetags and not twotags ) or ( twotags and not onetags ):
raise CustomizationError( )
else:
self.onetags = russell( )
self.twotags = russell( )
self.mode = mode
else:
raise ModeError( mode )
def __getattr__( self, attr ):
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError, attr
return element( attr, case=self.case, parent=self )
def __str__( self ):
if self._full and ( self.mode == 'strict_html' or self.mode == 'loose_html' ):
end = [ '</body>', '</html>' ]
else:
end = [ ]
return self.separator.join( self.header + self.content + self.footer + end )
def __call__( self, escape=False ):
"""Return the document as a string.
escape -- False print normally
True replace < and > by < and >
the default escape sequences in most browsers"""
if escape:
return _escape( self.__str__( ) )
else:
return self.__str__( )
def add( self, text ):
"""This is an alias to addcontent."""
self.addcontent( text )
def addfooter( self, text ):
"""Add some text to the bottom of the document"""
self.footer.append( text )
def addheader( self, text ):
"""Add some text to the top of the document"""
self.header.append( text )
def addcontent( self, text ):
"""Add some text to the main part of the document"""
self.content.append( text )
def init( self, lang='en', css=None, metainfo=None, title=None, header=None,
footer=None, charset=None, encoding=None, doctype=None, bodyattrs=None, script=None ):
"""This method is used for complete documents with appropriate
doctype, encoding, title, etc information. For an HTML/XML snippet
omit this method.
lang -- language, usually a two character string, will appear
as <html lang='en'> in html mode (ignored in xml mode)
css -- Cascading Style Sheet filename as a string or a list of
strings for multiple css files (ignored in xml mode)
metainfo -- a dictionary in the form { 'name':'content' } to be inserted
into meta element(s) as <meta name='name' content='content'>
(ignored in xml mode)
bodyattrs --a dictionary in the form { 'key':'value', ... } which will be added
as attributes of the <body> element as <body key='value' ... >
(ignored in xml mode)
script -- dictionary containing src:type pairs, <script type='text/type' src=src></script>
title -- the title of the document as a string to be inserted into
a title element as <title>my title</title> (ignored in xml mode)
header -- some text to be inserted right after the <body> element
(ignored in xml mode)
footer -- some text to be inserted right before the </body> element
(ignored in xml mode)
charset -- a string defining the character set, will be inserted into a
<meta http-equiv='Content-Type' content='text/html; charset=myset'>
element (ignored in xml mode)
encoding -- a string defining the encoding, will be put into to first line of
the document as <?xml version='1.0' encoding='myencoding' ?> in
xml mode (ignored in html mode)
doctype -- the document type string, defaults to
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>
in html mode (ignored in xml mode)"""
self._full = True
if self.mode == 'strict_html' or self.mode == 'loose_html':
if doctype is None:
doctype = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>"
self.header.append( doctype )
self.html( lang=lang )
self.head( )
if charset is not None:
self.meta( http_equiv='Content-Type', content="text/html; charset=%s" % charset )
if metainfo is not None:
self.metainfo( metainfo )
if css is not None:
self.css( css )
if title is not None:
self.title( title )
if script is not None:
self.scripts( script )
self.head.close()
if bodyattrs is not None:
self.body( **bodyattrs )
else:
self.body( )
if header is not None:
self.content.append( header )
if footer is not None:
self.footer.append( footer )
elif self.mode == 'xml':
if doctype is None:
if encoding is not None:
doctype = "<?xml version='1.0' encoding='%s' ?>" % encoding
else:
doctype = "<?xml version='1.0' ?>"
self.header.append( doctype )
def css( self, filelist ):
"""This convenience function is only useful for html.
It adds css stylesheet(s) to the document via the <link> element."""
if isinstance( filelist, basestring ):
self.link( href=filelist, rel='stylesheet', type='text/css', media='all' )
else:
for file in filelist:
self.link( href=file, rel='stylesheet', type='text/css', media='all' )
def metainfo( self, mydict ):
"""This convenience function is only useful for html.
It adds meta information via the <meta> element, the argument is
a dictionary of the form { 'name':'content' }."""
if isinstance( mydict, dict ):
for name, content in mydict.iteritems( ):
self.meta( name=name, content=content )
else:
raise TypeError, "Metainfo should be called with a dictionary argument of name:content pairs."
def scripts( self, mydict ):
"""Only useful in html, mydict is dictionary of src:type pairs will
be rendered as <script type='text/type' src=src></script>"""
if isinstance( mydict, dict ):
for src, type in mydict.iteritems( ):
self.script( '', src=src, type='text/%s' % type )
else:
raise TypeError, "Script should be given a dictionary of src:type pairs."
class _oneliner:
"""An instance of oneliner returns a string corresponding to one element.
This class can be used to write 'oneliners' that return a string
immediately so there is no need to instantiate the page class."""
def __init__( self, case='lower' ):
self.case = case
def __getattr__( self, attr ):
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError, attr
return element( attr, case=self.case, parent=None )
oneliner = _oneliner( case='lower' )
upper_oneliner = _oneliner( case='upper' )
def _argsdicts( args, mydict ):
"""A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1."""
if len( args ) == 0:
args = None,
elif len( args ) == 1:
args = _totuple( args[0] )
else:
raise Exception, "We should have never gotten here."
mykeys = mydict.keys( )
myvalues = map( _totuple, mydict.values( ) )
maxlength = max( map( len, [ args ] + myvalues ) )
for i in xrange( maxlength ):
thisdict = { }
for key, value in zip( mykeys, myvalues ):
try:
thisdict[ key ] = value[i]
except IndexError:
thisdict[ key ] = value[-1]
try:
thisarg = args[i]
except IndexError:
thisarg = args[-1]
yield thisarg, thisdict
def _totuple( x ):
"""Utility stuff to convert string, int, float, None or anything to a usable tuple."""
if isinstance( x, basestring ):
out = x,
elif isinstance( x, ( int, float ) ):
out = str( x ),
elif x is None:
out = None,
else:
out = tuple( x )
return out
def escape( text, newline=False ):
"""Escape special html characters."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '\"' in text:
text = text.replace( '\"', '"' )
if '\'' in text:
text = text.replace( '\'', '"' )
if newline:
if '\n' in text:
text = text.replace( '\n', '<br>' )
return text
_escape = escape
def unescape( text ):
"""Inverse of escape."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '"' in text:
text = text.replace( '"', '\"' )
return text
class dummy:
"""A dummy class for attaching attributes."""
pass
doctype = dummy( )
doctype.frameset = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Frameset//EN' 'http://www.w3.org/TR/html4/frameset.dtd'>"
doctype.strict = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01//EN' 'http://www.w3.org/TR/html4/strict.dtd'>"
doctype.loose = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'>"
class russell:
"""A dummy class that contains anything."""
def __contains__( self, item ):
return True
class MarkupError( Exception ):
"""All our exceptions subclass this."""
def __str__( self ):
return self.message
class ClosingError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' does not accept non-keyword arguments (has no closing tag)." % tag
class OpeningError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' can not be opened." % tag
class ArgumentError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' was called with more than one non-keyword argument." % tag
class InvalidElementError( MarkupError ):
def __init__( self, tag, mode ):
self.message = "The element '%s' is not valid for your mode '%s'." % ( tag, mode )
class DeprecationError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' is deprecated, instantiate markup.page with mode='loose_html' to allow it." % tag
class ModeError( MarkupError ):
def __init__( self, mode ):
self.message = "Mode '%s' is invalid, possible values: strict_html, loose_html, xml." % mode
class CustomizationError( MarkupError ):
def __init__( self ):
self.message = "If you customize the allowed elements, you must define both types 'onetags' and 'twotags'."
if __name__ == '__main__':
print __doc__
|
alexandrucoman/vbox-nova-driver | refs/heads/master | nova/virt/vmwareapi/__init__.py | 97 | # Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`vmwareapi` -- Nova support for VMware vCenter through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
from nova.virt.vmwareapi import driver
VMwareVCDriver = driver.VMwareVCDriver
|
matburt/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/vca.py | 21 | # (c) 2016, Charles Paul <cpaul@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Parameters for VCA modules
DOCUMENTATION = """
options:
username:
description:
- The vca username or email address, if not set the environment variable VCA_USER is checked for the username.
required: false
default: None
aliases: ['user']
password:
description:
- The vca password, if not set the environment variable VCA_PASS is checked for the password
required: false
default: None
aliases: ['pass', 'pwd']
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
required: false
default: None
instance_id:
description:
- The instance id in a vchs environment to be used for creating the vapp
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
required: false
default: None
api_version:
description:
- The api version to be used with the vca
required: false
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
state:
description:
- if the object should be added or removed
required: false
default: present
choices: [ "present", "absent" ]
verify_certs:
description:
- If the certificates of the authentication is to be verified
required: false
default: True
vdc_name:
description:
- The name of the vdc where the gateway is located.
required: false
default: None
gateway_name:
description:
- The name of the gateway of the vdc where the rule should be added
required: false
default: gateway
"""
|
jjmiranda/edx-platform | refs/heads/master | lms/djangoapps/student_account/test/test_views.py | 1 | # -*- coding: utf-8 -*-
""" Tests for student account views. """
import logging
import re
from unittest import skipUnless
from urllib import urlencode
import mock
import ddt
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.http import HttpRequest
from edx_rest_api_client import exceptions
from nose.plugins.attrib import attr
from testfixtures import LogCapture
from commerce.models import CommerceConfiguration
from commerce.tests import TEST_API_URL, TEST_API_SIGNING_KEY, factories
from commerce.tests.mocks import mock_get_orders
from course_modes.models import CourseMode
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student_account.views import account_settings_context, get_user_orders
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme_context
LOGGER_NAME = 'audit'
@ddt.ddt
class StudentAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(StudentAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
logger.check((LOGGER_NAME, 'INFO', 'Invalid password reset attempt'))
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for __ in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@attr(shard=3)
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
URLCONF_MODULES = ['embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp()
# For these tests, three third party auth providers are enabled by default:
self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
self.configure_dummy_provider(
visible=True,
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(None, "signin_user"),
(None, "register_user"),
("edx.org", "signin_user"),
("edx.org", "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, theme, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
("signin_user", "dummy", "Dummy"),
("register_user", "dummy", "Dummy"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Site Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Site")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase, ProgramsApiConfigMixin):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
'time_zone',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
CommerceConfiguration.objects.create(cache_ttl=10, enabled=True)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
"""
Test that all fields are visible
"""
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
def test_header_with_programs_listing_enabled(self):
"""
Verify that tabs header will be shown while program listing is enabled.
"""
self.create_programs_config(program_listing_enabled=True)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="tab-nav-item">')
def test_header_with_programs_listing_disabled(self):
"""
Verify that nav header will be shown while program listing is disabled.
"""
self.create_programs_config(program_listing_enabled=False)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="item nav-global-01">')
def test_commerce_order_detail(self):
with mock_get_orders():
order_detail = get_user_orders(self.user)
user_order = mock_get_orders.default_response['results'][0]
expected = [
{
'number': user_order['number'],
'price': user_order['total_excl_tax'],
'title': user_order['lines'][0]['title'],
'order_date': 'Jan 01, 2016',
'receipt_url': '/commerce/checkout/receipt/?orderNum=' + user_order['number']
}
]
self.assertEqual(order_detail, expected)
def test_commerce_order_detail_exception(self):
with mock_get_orders(exception=exceptions.HttpNotFoundError):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_incomplete_order_detail(self):
response = {
'results': [
factories.OrderFactory(
status='Incomplete',
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory()])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_honor_course_order_detail(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='honor'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_order_history_with_no_product(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=None
),
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(len(order_detail), 1)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
|
Abi1ity/uniclust2.0 | refs/heads/master | flask/lib/python2.7/site-packages/django/middleware/common.py | 52 | import hashlib
import logging
import re
import warnings
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.SEND_BROKEN_LINK_EMAILS:
warnings.warn("SEND_BROKEN_LINK_EMAILS is deprecated. "
"Use BrokenLinkEmailsMiddleware instead.",
RemovedInDjango18Warning, stacklevel=2)
BrokenLinkEmailsMiddleware().process_response(request, response)
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
|
gkotton/neutron | refs/heads/master | neutron/extensions/agent.py | 16 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron import manager
# Attribute Map
RESOURCE_NAME = 'agent'
RESOURCE_ATTRIBUTE_MAP = {
RESOURCE_NAME + 's': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'agent_type': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'binary': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'topic': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'host': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'admin_state_up': {'allow_post': False, 'allow_put': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'created_at': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'started_at': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'heartbeat_timestamp': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'alive': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'configurations': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'description': {'allow_post': False, 'allow_put': True,
'is_visible': True,
'validate': {'type:string': None}},
},
}
class AgentNotFound(exceptions.NotFound):
message = _("Agent %(id)s could not be found")
class AgentNotFoundByTypeHost(exceptions.NotFound):
message = _("Agent with agent_type=%(agent_type)s and host=%(host)s "
"could not be found")
class MultipleAgentFoundByTypeHost(exceptions.Conflict):
message = _("Multiple agents with agent_type=%(agent_type)s and "
"host=%(host)s found")
class Agent(object):
"""Agent management extension."""
@classmethod
def get_name(cls):
return "agent"
@classmethod
def get_alias(cls):
return "agent"
@classmethod
def get_description(cls):
return "The agent management extension."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/agent/api/v2.0"
@classmethod
def get_updated(cls):
return "2013-02-03T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
attr.PLURALS.update(dict(my_plurals))
plugin = manager.NeutronManager.get_plugin()
params = RESOURCE_ATTRIBUTE_MAP.get(RESOURCE_NAME + 's')
controller = base.create_resource(RESOURCE_NAME + 's',
RESOURCE_NAME,
plugin, params
)
ex = extensions.ResourceExtension(RESOURCE_NAME + 's',
controller)
return [ex]
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class AgentPluginBase(object):
"""REST API to operate the Agent.
All of method must be in an admin context.
"""
def create_agent(self, context, agent):
"""Create agent.
This operation is not allow in REST API.
@raise exceptions.BadRequest:
"""
raise exceptions.BadRequest()
@abc.abstractmethod
def delete_agent(self, context, id):
"""Delete agent.
Agents register themselves on reporting state.
But if a agent does not report its status
for a long time (for example, it is dead for ever. ),
admin can remove it. Agents must be disabled before
being removed.
"""
pass
@abc.abstractmethod
def update_agent(self, context, agent):
"""Disable or Enable the agent.
Discription also can be updated. Some agents cannot be disabled, such
as plugins, services. An error code should be reported in this case.
@raise exceptions.BadRequest:
"""
pass
@abc.abstractmethod
def get_agents(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_agent(self, context, id, fields=None):
pass
|
nuobit/odoo-addons | refs/heads/11.0 | connector_sage/models/payroll_sage_payslip_check/binding.py | 1 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import models, fields
from odoo.addons.component.core import Component
from odoo.addons.queue_job.job import job
class PayslipCheck(models.Model):
_inherit = 'payroll.sage.payslip.check'
sage_bind_ids = fields.One2many(
comodel_name='sage.payroll.sage.payslip.check',
inverse_name='odoo_id',
string='Sage Bindings',
)
class PayslipCheckBinding(models.Model):
_name = 'sage.payroll.sage.payslip.check'
_inherit = 'sage.binding'
_inherits = {'payroll.sage.payslip.check': 'odoo_id'}
odoo_id = fields.Many2one(comodel_name='payroll.sage.payslip.check',
string='Payslip check',
required=True,
ondelete='cascade')
## composed id
sage_codigo_empresa = fields.Integer(string="CodigoEmpresa", required=True)
sage_codigo_empleado = fields.Integer(string="CodigoEmpleado", required=True)
sage_ano = fields.Integer(string="Año", required=True)
sage_mesd = fields.Integer(string="MesD", required=True)
sage_tipo_proceso = fields.Char(string="TipoProceso", required=True)
sage_id_empleado = fields.Char(string="IdEmpleado", required=True)
sage_orden_nom = fields.Integer(string="OrdenNom", required=True)
_sql_constraints = [
('uniq',
'unique(sage_codigo_empresa, sage_codigo_empleado, sage_ano, '
'sage_mesd, sage_tipo_proceso, sage_id_empleado, sage_orden_nom)',
'Payroll structure with same ID on Sage already exists.'),
]
@job(default_channel='root.sage')
def import_payslip_checks(self, payslip_id, backend_record):
""" Prepare the import of payslip from Sage """
filters = {
'CodigoEmpresa': backend_record.sage_company_id,
'Año': payslip_id.year,
'MesD': ('between', (payslip_id.month_from, payslip_id.month_to)),
'TipoProceso': payslip_id.process,
}
self.env['sage.payroll.sage.payslip.check'].import_batch(
backend=backend_record, filters=filters)
return True
|
mccdaq/mcculw | refs/heads/master | examples/ui/ULDI03.py | 1 | """
File: ULDI03.py
Library Call Demonstrated: mcculw.ul.d_in_scan()
Purpose: Reads digital input port(s) at specified rate and
number of samples.
Demonstration: Configures the first one or two digital
scan ports for input (if programmable)
and reads the value on the port.
Other Library Calls: mcculw.ul.win_buf_alloc()
mcculw.ul.win_buf_free()
mcculw.ul.d_config_port()
mcculw.ul.stop_background()
Special Requirements: Device must support paced digital input
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from tkinter import messagebox
from ctypes import cast, POINTER, c_ushort
from mcculw import ul
from mcculw.enums import ScanOptions, Status, FunctionType, DigitalIODirection
from mcculw.ul import ULError
from mcculw.device_info import DaqDeviceInfo
try:
from ui_examples_util import UIExample, show_ul_error
except ImportError:
from .ui_examples_util import UIExample, show_ul_error
class ULDI03(UIExample):
def __init__(self, master=None):
super(ULDI03, self).__init__(master)
# By default, the example detects all available devices and selects the
# first device listed.
# If use_device_detection is set to False, the board_num property needs
# to match the desired board number configured with Instacal.
use_device_detection = True
self.board_num = 0
try:
if use_device_detection:
self.configure_first_detected_device()
self.device_info = DaqDeviceInfo(self.board_num)
dio_info = self.device_info.get_dio_info()
# Find the first port that supports input, defaulting to None
# if one is not found.
self.port = next((port for port in dio_info.port_info
if port.supports_input_scan), None)
if self.port is not None:
self.create_widgets()
else:
self.create_unsupported_widgets()
except ULError:
self.create_unsupported_widgets(True)
def start_scan(self):
rate = 100
count = 1000
# Allocate a buffer for the scan
self.memhandle = ul.win_buf_alloc(count)
# Check if the buffer was successfully allocated
if not self.memhandle:
messagebox.showerror("Error", "Failed to allocate memory")
self.set_ui_idle_state()
return
try:
# Configure the port (if necessary)
if self.port.is_port_configurable:
ul.d_config_port(self.board_num, self.port.type,
DigitalIODirection.IN)
# Run the scan
ul.d_in_scan(self.board_num, self.port.type, count, rate,
self.memhandle, ScanOptions.BACKGROUND)
except ULError as e:
show_ul_error(e)
self.set_ui_idle_state()
return
# Convert the memhandle to a ctypes array
# Note: the ctypes array will no longer be valid after win_buf_free is
# called. A copy of the buffer can be created using win_buf_to_array
# before the memory is freed. The copy can be used at any time.
self.ctypes_array = cast(self.memhandle, POINTER(c_ushort))
# Start updating the displayed values
self.update_displayed_values()
def update_displayed_values(self):
# Get the status from the device
status, curr_count, curr_index = ul.get_status(self.board_num,
FunctionType.DIFUNCTION)
# Display the status info
self.update_status_labels(status, curr_count, curr_index)
# Display the values
self.display_values(curr_index, curr_count)
# Call this method again until the stop button is pressed (or an error
# occurs)
if status == Status.RUNNING:
self.after(100, self.update_displayed_values)
else:
# Free the allocated memory
ul.win_buf_free(self.memhandle)
# Stop the background operation (this is required even if the
# scan completes successfully)
ul.stop_background(self.board_num, FunctionType.DIFUNCTION)
self.set_ui_idle_state()
def update_status_labels(self, status, curr_count, curr_index):
if status == Status.IDLE:
self.status_label["text"] = "Idle"
else:
self.status_label["text"] = "Running"
self.index_label["text"] = str(curr_index)
self.count_label["text"] = str(curr_count)
def display_values(self, curr_index, curr_count):
display_count = 10
array = self.ctypes_array
data_text = ""
# If no data has been gathered, don't add data to the labels
if curr_count > 1:
# curr_index points to the start of the last completed data point
# that was transferred between the board and the data buffer. Based
# on this, calculate the first index we want to display using
# subtraction.
first_index = max(curr_index - display_count - 1, 0)
# Add (up to) the latest 10 values for each channel to the text
for data_index in range(
first_index,
first_index + min(display_count, curr_count)):
data_text += hex(array[data_index]) + "\n"
self.data_label["text"] = data_text
def stop(self):
ul.stop_background(self.board_num, FunctionType.DIFUNCTION)
def set_ui_idle_state(self):
self.start_button["command"] = self.start
self.start_button["text"] = "Start"
def start(self):
self.start_button["command"] = self.stop
self.start_button["text"] = "Stop"
self.start_scan()
def create_widgets(self):
'''Create the tkinter UI'''
self.device_label = tk.Label(self)
self.device_label.pack(fill=tk.NONE, anchor=tk.NW)
self.device_label["text"] = ('Board Number ' + str(self.board_num)
+ ": " + self.device_info.product_name
+ " (" + self.device_info.unique_id + ")")
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
self.results_group = tk.LabelFrame(
self, text="Results", padx=3, pady=3)
self.results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
self.results_group.grid_columnconfigure(1, weight=1)
curr_row = 0
status_left_label = tk.Label(self.results_group)
status_left_label["text"] = "Status:"
status_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.status_label = tk.Label(self.results_group)
self.status_label["text"] = "Idle"
self.status_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
index_left_label = tk.Label(self.results_group)
index_left_label["text"] = "Index:"
index_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.index_label = tk.Label(self.results_group)
self.index_label["text"] = "-1"
self.index_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
count_left_label = tk.Label(self.results_group)
count_left_label["text"] = "Count:"
count_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.count_label = tk.Label(self.results_group)
self.count_label["text"] = "0"
self.count_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
self.inner_data_frame = tk.Frame(self.results_group)
self.inner_data_frame.grid(
row=curr_row, column=0, columnspan=2, sticky=tk.W)
self.data_label = tk.Label(self.inner_data_frame)
self.data_label.grid()
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
self.start_button = tk.Button(button_frame)
self.start_button["text"] = "Start"
self.start_button["command"] = self.start
self.start_button.grid(row=0, column=0, padx=3, pady=3)
self.quit_button = tk.Button(button_frame)
self.quit_button["text"] = "Quit"
self.quit_button["command"] = self.master.destroy
self.quit_button.grid(row=0, column=1, padx=3, pady=3)
if __name__ == "__main__":
# Start the example
ULDI03(master=tk.Tk()).mainloop()
|
mehdi149/Learning-projects | refs/heads/master | rest-server/rest_server/rest_server/urls.py | 1 | """rest_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include , url
from django.contrib import admin
urlpatterns = [
url(r'^learning/', include('restServer.urls')),
url(r'^admin/', admin.site.urls),
]
|
SOKP/external_chromium_org | refs/heads/sokp-l5.1 | tools/perf/benchmarks/service_worker.py | 32 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import page_sets
from telemetry import benchmark
from telemetry.page import page_test
from telemetry.value import scalar
class _ServiceWorkerMeasurement(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-experimental-web-platform-features'
])
def ValidateAndMeasurePage(self, _, tab, results):
tab.WaitForJavaScriptExpression('window.done', 40)
json = tab.EvaluateJavaScript('window.results')
for key, value in json.iteritems():
results.AddValue(scalar.ScalarValue(
results.current_page, key, value['units'], value['value']))
@benchmark.Disabled
class ServiceWorkerPerfTest(benchmark.Benchmark):
test = _ServiceWorkerMeasurement
page_set = page_sets.ServiceWorkerPageSet
|
amaozhao/basecms | refs/heads/master | unidecode/x06b.py | 252 | data = (
'Xiang ', # 0x00
'Nong ', # 0x01
'Bo ', # 0x02
'Chan ', # 0x03
'Lan ', # 0x04
'Ju ', # 0x05
'Shuang ', # 0x06
'She ', # 0x07
'Wei ', # 0x08
'Cong ', # 0x09
'Quan ', # 0x0a
'Qu ', # 0x0b
'Cang ', # 0x0c
'[?] ', # 0x0d
'Yu ', # 0x0e
'Luo ', # 0x0f
'Li ', # 0x10
'Zan ', # 0x11
'Luan ', # 0x12
'Dang ', # 0x13
'Jue ', # 0x14
'Em ', # 0x15
'Lan ', # 0x16
'Lan ', # 0x17
'Zhu ', # 0x18
'Lei ', # 0x19
'Li ', # 0x1a
'Ba ', # 0x1b
'Nang ', # 0x1c
'Yu ', # 0x1d
'Ling ', # 0x1e
'Tsuki ', # 0x1f
'Qian ', # 0x20
'Ci ', # 0x21
'Huan ', # 0x22
'Xin ', # 0x23
'Yu ', # 0x24
'Yu ', # 0x25
'Qian ', # 0x26
'Ou ', # 0x27
'Xu ', # 0x28
'Chao ', # 0x29
'Chu ', # 0x2a
'Chi ', # 0x2b
'Kai ', # 0x2c
'Yi ', # 0x2d
'Jue ', # 0x2e
'Xi ', # 0x2f
'Xu ', # 0x30
'Xia ', # 0x31
'Yu ', # 0x32
'Kuai ', # 0x33
'Lang ', # 0x34
'Kuan ', # 0x35
'Shuo ', # 0x36
'Xi ', # 0x37
'Ai ', # 0x38
'Yi ', # 0x39
'Qi ', # 0x3a
'Hu ', # 0x3b
'Chi ', # 0x3c
'Qin ', # 0x3d
'Kuan ', # 0x3e
'Kan ', # 0x3f
'Kuan ', # 0x40
'Kan ', # 0x41
'Chuan ', # 0x42
'Sha ', # 0x43
'Gua ', # 0x44
'Yin ', # 0x45
'Xin ', # 0x46
'Xie ', # 0x47
'Yu ', # 0x48
'Qian ', # 0x49
'Xiao ', # 0x4a
'Yi ', # 0x4b
'Ge ', # 0x4c
'Wu ', # 0x4d
'Tan ', # 0x4e
'Jin ', # 0x4f
'Ou ', # 0x50
'Hu ', # 0x51
'Ti ', # 0x52
'Huan ', # 0x53
'Xu ', # 0x54
'Pen ', # 0x55
'Xi ', # 0x56
'Xiao ', # 0x57
'Xu ', # 0x58
'Xi ', # 0x59
'Sen ', # 0x5a
'Lian ', # 0x5b
'Chu ', # 0x5c
'Yi ', # 0x5d
'Kan ', # 0x5e
'Yu ', # 0x5f
'Chuo ', # 0x60
'Huan ', # 0x61
'Zhi ', # 0x62
'Zheng ', # 0x63
'Ci ', # 0x64
'Bu ', # 0x65
'Wu ', # 0x66
'Qi ', # 0x67
'Bu ', # 0x68
'Bu ', # 0x69
'Wai ', # 0x6a
'Ju ', # 0x6b
'Qian ', # 0x6c
'Chi ', # 0x6d
'Se ', # 0x6e
'Chi ', # 0x6f
'Se ', # 0x70
'Zhong ', # 0x71
'Sui ', # 0x72
'Sui ', # 0x73
'Li ', # 0x74
'Cuo ', # 0x75
'Yu ', # 0x76
'Li ', # 0x77
'Gui ', # 0x78
'Dai ', # 0x79
'Dai ', # 0x7a
'Si ', # 0x7b
'Jian ', # 0x7c
'Zhe ', # 0x7d
'Mo ', # 0x7e
'Mo ', # 0x7f
'Yao ', # 0x80
'Mo ', # 0x81
'Cu ', # 0x82
'Yang ', # 0x83
'Tian ', # 0x84
'Sheng ', # 0x85
'Dai ', # 0x86
'Shang ', # 0x87
'Xu ', # 0x88
'Xun ', # 0x89
'Shu ', # 0x8a
'Can ', # 0x8b
'Jue ', # 0x8c
'Piao ', # 0x8d
'Qia ', # 0x8e
'Qiu ', # 0x8f
'Su ', # 0x90
'Qing ', # 0x91
'Yun ', # 0x92
'Lian ', # 0x93
'Yi ', # 0x94
'Fou ', # 0x95
'Zhi ', # 0x96
'Ye ', # 0x97
'Can ', # 0x98
'Hun ', # 0x99
'Dan ', # 0x9a
'Ji ', # 0x9b
'Ye ', # 0x9c
'Zhen ', # 0x9d
'Yun ', # 0x9e
'Wen ', # 0x9f
'Chou ', # 0xa0
'Bin ', # 0xa1
'Ti ', # 0xa2
'Jin ', # 0xa3
'Shang ', # 0xa4
'Yin ', # 0xa5
'Diao ', # 0xa6
'Cu ', # 0xa7
'Hui ', # 0xa8
'Cuan ', # 0xa9
'Yi ', # 0xaa
'Dan ', # 0xab
'Du ', # 0xac
'Jiang ', # 0xad
'Lian ', # 0xae
'Bin ', # 0xaf
'Du ', # 0xb0
'Tsukusu ', # 0xb1
'Jian ', # 0xb2
'Shu ', # 0xb3
'Ou ', # 0xb4
'Duan ', # 0xb5
'Zhu ', # 0xb6
'Yin ', # 0xb7
'Qing ', # 0xb8
'Yi ', # 0xb9
'Sha ', # 0xba
'Que ', # 0xbb
'Ke ', # 0xbc
'Yao ', # 0xbd
'Jun ', # 0xbe
'Dian ', # 0xbf
'Hui ', # 0xc0
'Hui ', # 0xc1
'Gu ', # 0xc2
'Que ', # 0xc3
'Ji ', # 0xc4
'Yi ', # 0xc5
'Ou ', # 0xc6
'Hui ', # 0xc7
'Duan ', # 0xc8
'Yi ', # 0xc9
'Xiao ', # 0xca
'Wu ', # 0xcb
'Guan ', # 0xcc
'Mu ', # 0xcd
'Mei ', # 0xce
'Mei ', # 0xcf
'Ai ', # 0xd0
'Zuo ', # 0xd1
'Du ', # 0xd2
'Yu ', # 0xd3
'Bi ', # 0xd4
'Bi ', # 0xd5
'Bi ', # 0xd6
'Pi ', # 0xd7
'Pi ', # 0xd8
'Bi ', # 0xd9
'Chan ', # 0xda
'Mao ', # 0xdb
'[?] ', # 0xdc
'[?] ', # 0xdd
'Pu ', # 0xde
'Mushiru ', # 0xdf
'Jia ', # 0xe0
'Zhan ', # 0xe1
'Sai ', # 0xe2
'Mu ', # 0xe3
'Tuo ', # 0xe4
'Xun ', # 0xe5
'Er ', # 0xe6
'Rong ', # 0xe7
'Xian ', # 0xe8
'Ju ', # 0xe9
'Mu ', # 0xea
'Hao ', # 0xeb
'Qiu ', # 0xec
'Dou ', # 0xed
'Mushiru ', # 0xee
'Tan ', # 0xef
'Pei ', # 0xf0
'Ju ', # 0xf1
'Duo ', # 0xf2
'Cui ', # 0xf3
'Bi ', # 0xf4
'San ', # 0xf5
'[?] ', # 0xf6
'Mao ', # 0xf7
'Sui ', # 0xf8
'Yu ', # 0xf9
'Yu ', # 0xfa
'Tuo ', # 0xfb
'He ', # 0xfc
'Jian ', # 0xfd
'Ta ', # 0xfe
'San ', # 0xff
)
|
zangree/ryu | refs/heads/master | ryu/controller/controller.py | 3 | # Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main component of OpenFlow controller.
- Handle connections from switches
- Generate and route events to appropriate entities like Ryu applications
"""
import contextlib
from ryu import cfg
import logging
from ryu.lib import hub
from ryu.lib.hub import StreamServer
import traceback
import random
import ssl
from socket import IPPROTO_TCP, TCP_NODELAY, SHUT_RDWR, timeout as SocketTimeout
import warnings
import ryu.base.app_manager
from ryu.ofproto import ofproto_common
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import nx_match
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER, DEAD_DISPATCHER
from ryu.lib.dpid import dpid_to_str
LOG = logging.getLogger('ryu.controller.controller')
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt('ofp-listen-host', default='', help='openflow listen host'),
cfg.IntOpt('ofp-tcp-listen-port', default=None,
help='openflow tcp listen port '
'(default: %d)' % ofproto_common.OFP_TCP_PORT),
cfg.IntOpt('ofp-ssl-listen-port', default=None,
help='openflow ssl listen port '
'(default: %d)' % ofproto_common.OFP_SSL_PORT),
cfg.StrOpt('ctl-privkey', default=None, help='controller private key'),
cfg.StrOpt('ctl-cert', default=None, help='controller certificate'),
cfg.StrOpt('ca-certs', default=None, help='CA certificates')
])
CONF.register_opts([
cfg.FloatOpt('socket-timeout',
default=5.0,
help='Time, in seconds, to await completion of socket operations.'),
cfg.FloatOpt('echo-request-interval',
default=15.0,
help='Time, in seconds, between sending echo requests to a datapath.'),
cfg.IntOpt('maximum-unreplied-echo-requests',
default=0,
min=0,
help='Maximum number of unreplied echo requests before datapath is disconnected.')
])
class OpenFlowController(object):
def __init__(self):
super(OpenFlowController, self).__init__()
if not CONF.ofp_tcp_listen_port and not CONF.ofp_ssl_listen_port:
self.ofp_tcp_listen_port = ofproto_common.OFP_TCP_PORT
self.ofp_ssl_listen_port = ofproto_common.OFP_SSL_PORT
# For the backward compatibility, we spawn a server loop
# listening on the old OpenFlow listen port 6633.
hub.spawn(self.server_loop,
ofproto_common.OFP_TCP_PORT_OLD,
ofproto_common.OFP_SSL_PORT_OLD)
else:
self.ofp_tcp_listen_port = CONF.ofp_tcp_listen_port
self.ofp_ssl_listen_port = CONF.ofp_ssl_listen_port
# entry point
def __call__(self):
# LOG.debug('call')
self.server_loop(self.ofp_tcp_listen_port,
self.ofp_ssl_listen_port)
def server_loop(self, ofp_tcp_listen_port, ofp_ssl_listen_port):
if CONF.ctl_privkey is not None and CONF.ctl_cert is not None:
if CONF.ca_certs is not None:
server = StreamServer((CONF.ofp_listen_host,
ofp_ssl_listen_port),
datapath_connection_factory,
keyfile=CONF.ctl_privkey,
certfile=CONF.ctl_cert,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=CONF.ca_certs,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
server = StreamServer((CONF.ofp_listen_host,
ofp_ssl_listen_port),
datapath_connection_factory,
keyfile=CONF.ctl_privkey,
certfile=CONF.ctl_cert,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
server = StreamServer((CONF.ofp_listen_host,
ofp_tcp_listen_port),
datapath_connection_factory)
# LOG.debug('loop')
server.serve_forever()
def _deactivate(method):
def deactivate(self):
try:
method(self)
finally:
try:
self.socket.shutdown(SHUT_RDWR)
except (EOFError, IOError):
pass
if not self.is_active:
self.socket.close()
return deactivate
class Datapath(ofproto_protocol.ProtocolDesc):
"""
A class to describe an OpenFlow switch connected to this controller.
An instance has the following attributes.
.. tabularcolumns:: |l|L|
==================================== ======================================
Attribute Description
==================================== ======================================
id 64-bit OpenFlow Datapath ID.
Only available for
ryu.controller.handler.MAIN_DISPATCHER
phase.
ofproto A module which exports OpenFlow
definitions, mainly constants appeared
in the specification, for the
negotiated OpenFlow version. For
example, ryu.ofproto.ofproto_v1_0 for
OpenFlow 1.0.
ofproto_parser A module which exports OpenFlow wire
message encoder and decoder for the
negotiated OpenFlow version.
For example,
ryu.ofproto.ofproto_v1_0_parser
for OpenFlow 1.0.
ofproto_parser.OFPxxxx(datapath,...) A callable to prepare an OpenFlow
message for the given switch. It can
be sent with Datapath.send_msg later.
xxxx is a name of the message. For
example OFPFlowMod for flow-mod
message. Arguemnts depend on the
message.
set_xid(self, msg) Generate an OpenFlow XID and put it
in msg.xid.
send_msg(self, msg) Queue an OpenFlow message to send to
the corresponding switch. If msg.xid
is None, set_xid is automatically
called on the message before queueing.
send_packet_out deprecated
send_flow_mod deprecated
send_flow_del deprecated
send_delete_all_flows deprecated
send_barrier Queue an OpenFlow barrier message to
send to the switch.
send_nxt_set_flow_format deprecated
is_reserved_port deprecated
==================================== ======================================
"""
def __init__(self, socket, address):
super(Datapath, self).__init__()
self.socket = socket
self.socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
self.socket.settimeout(CONF.socket_timeout)
self.address = address
self.is_active = True
# The limit is arbitrary. We need to limit queue size to
# prevent it from eating memory up.
self.send_q = hub.Queue(16)
self._send_q_sem = hub.BoundedSemaphore(self.send_q.maxsize)
self.echo_request_interval = CONF.echo_request_interval
self.max_unreplied_echo_requests = CONF.maximum_unreplied_echo_requests
self.unreplied_echo_requests = []
self.xid = random.randint(0, self.ofproto.MAX_XID)
self.id = None # datapath_id is unknown yet
self._ports = None
self.flow_format = ofproto_v1_0.NXFF_OPENFLOW10
self.ofp_brick = ryu.base.app_manager.lookup_service_brick('ofp_event')
self.set_state(HANDSHAKE_DISPATCHER)
@_deactivate
def close(self):
if self.state != DEAD_DISPATCHER:
self.set_state(DEAD_DISPATCHER)
def set_state(self, state):
self.state = state
ev = ofp_event.EventOFPStateChange(self)
ev.state = state
self.ofp_brick.send_event_to_observers(ev, state)
# Low level socket handling layer
@_deactivate
def _recv_loop(self):
buf = bytearray()
required_len = ofproto_common.OFP_HEADER_SIZE
count = 0
while self.state != DEAD_DISPATCHER:
ret = ""
try:
ret = self.socket.recv(required_len)
except SocketTimeout:
continue
except ssl.SSLError:
# eventlet throws SSLError (which is a subclass of IOError)
# on SSL socket read timeout; re-try the loop in this case.
continue
except (EOFError, IOError):
break
if len(ret) == 0:
break
buf += ret
while len(buf) >= required_len:
(version, msg_type, msg_len, xid) = ofproto_parser.header(buf)
required_len = msg_len
if len(buf) < required_len:
break
msg = ofproto_parser.msg(
self, version, msg_type, msg_len, xid, buf[:msg_len])
# LOG.debug('queue msg %s cls %s', msg, msg.__class__)
if msg:
ev = ofp_event.ofp_msg_to_ev(msg)
self.ofp_brick.send_event_to_observers(ev, self.state)
dispatchers = lambda x: x.callers[ev.__class__].dispatchers
handlers = [handler for handler in
self.ofp_brick.get_handlers(ev) if
self.state in dispatchers(handler)]
for handler in handlers:
handler(ev)
buf = buf[required_len:]
required_len = ofproto_common.OFP_HEADER_SIZE
# We need to schedule other greenlets. Otherwise, ryu
# can't accept new switches or handle the existing
# switches. The limit is arbitrary. We need the better
# approach in the future.
count += 1
if count > 2048:
count = 0
hub.sleep(0)
def _send_loop(self):
try:
while self.state != DEAD_DISPATCHER:
buf = self.send_q.get()
self._send_q_sem.release()
self.socket.sendall(buf)
except SocketTimeout:
LOG.debug("Socket timed out while sending data to switch at address %s",
self.address)
except IOError as ioe:
# Convert ioe.errno to a string, just in case it was somehow set to None.
errno = "%s" % ioe.errno
LOG.debug("Socket error while sending data to switch at address %s: [%s] %s",
self.address, errno, ioe.strerror)
finally:
q = self.send_q
# First, clear self.send_q to prevent new references.
self.send_q = None
# Now, drain the send_q, releasing the associated semaphore for each entry.
# This should release all threads waiting to acquire the semaphore.
try:
while q.get(block=False):
self._send_q_sem.release()
except hub.QueueEmpty:
pass
# Finally, ensure the _recv_loop terminates.
self.close()
def send(self, buf):
msg_enqueued = False
self._send_q_sem.acquire()
if self.send_q:
self.send_q.put(buf)
msg_enqueued = True
else:
self._send_q_sem.release()
if not msg_enqueued:
LOG.debug('Datapath in process of terminating; send() to %s discarded.',
self.address)
def set_xid(self, msg):
self.xid += 1
self.xid &= self.ofproto.MAX_XID
msg.set_xid(self.xid)
return self.xid
def send_msg(self, msg):
assert isinstance(msg, self.ofproto_parser.MsgBase)
if msg.xid is None:
self.set_xid(msg)
msg.serialize()
# LOG.debug('send_msg %s', msg)
self.send(msg.buf)
def _echo_request_loop(self):
if not self.max_unreplied_echo_requests:
return
while (self.send_q and
(len(self.unreplied_echo_requests) <= self.max_unreplied_echo_requests)):
echo_req = self.ofproto_parser.OFPEchoRequest(self)
self.unreplied_echo_requests.append(self.set_xid(echo_req))
self.send_msg(echo_req)
hub.sleep(self.echo_request_interval)
self.close()
def acknowledge_echo_reply(self, xid):
try:
self.unreplied_echo_requests.remove(xid)
except:
pass
def serve(self):
send_thr = hub.spawn(self._send_loop)
# send hello message immediately
hello = self.ofproto_parser.OFPHello(self)
self.send_msg(hello)
echo_thr = hub.spawn(self._echo_request_loop)
try:
self._recv_loop()
finally:
hub.kill(send_thr)
hub.kill(echo_thr)
hub.joinall([send_thr, echo_thr])
self.is_active = False
#
# Utility methods for convenience
#
def send_packet_out(self, buffer_id=0xffffffff, in_port=None,
actions=None, data=None):
if in_port is None:
in_port = self.ofproto.OFPP_NONE
packet_out = self.ofproto_parser.OFPPacketOut(
self, buffer_id, in_port, actions, data)
self.send_msg(packet_out)
def send_flow_mod(self, rule, cookie, command, idle_timeout, hard_timeout,
priority=None, buffer_id=0xffffffff,
out_port=None, flags=0, actions=None):
if priority is None:
priority = self.ofproto.OFP_DEFAULT_PRIORITY
if out_port is None:
out_port = self.ofproto.OFPP_NONE
flow_format = rule.flow_format()
assert (flow_format == ofproto_v1_0.NXFF_OPENFLOW10 or
flow_format == ofproto_v1_0.NXFF_NXM)
if self.flow_format < flow_format:
self.send_nxt_set_flow_format(flow_format)
if flow_format == ofproto_v1_0.NXFF_OPENFLOW10:
match_tuple = rule.match_tuple()
match = self.ofproto_parser.OFPMatch(*match_tuple)
flow_mod = self.ofproto_parser.OFPFlowMod(
self, match, cookie, command, idle_timeout, hard_timeout,
priority, buffer_id, out_port, flags, actions)
else:
flow_mod = self.ofproto_parser.NXTFlowMod(
self, cookie, command, idle_timeout, hard_timeout,
priority, buffer_id, out_port, flags, rule, actions)
self.send_msg(flow_mod)
def send_flow_del(self, rule, cookie, out_port=None):
self.send_flow_mod(rule=rule, cookie=cookie,
command=self.ofproto.OFPFC_DELETE,
idle_timeout=0, hard_timeout=0, priority=0,
out_port=out_port)
def send_delete_all_flows(self):
rule = nx_match.ClsRule()
self.send_flow_mod(
rule=rule, cookie=0, command=self.ofproto.OFPFC_DELETE,
idle_timeout=0, hard_timeout=0, priority=0, buffer_id=0,
out_port=self.ofproto.OFPP_NONE, flags=0, actions=None)
def send_barrier(self):
barrier_request = self.ofproto_parser.OFPBarrierRequest(self)
self.send_msg(barrier_request)
def send_nxt_set_flow_format(self, flow_format):
assert (flow_format == ofproto_v1_0.NXFF_OPENFLOW10 or
flow_format == ofproto_v1_0.NXFF_NXM)
if self.flow_format == flow_format:
# Nothing to do
return
self.flow_format = flow_format
set_format = self.ofproto_parser.NXTSetFlowFormat(self, flow_format)
# FIXME: If NXT_SET_FLOW_FORMAT or NXFF_NXM is not supported by
# the switch then an error message will be received. It may be
# handled by setting self.flow_format to
# ofproto_v1_0.NXFF_OPENFLOW10 but currently isn't.
self.send_msg(set_format)
self.send_barrier()
def is_reserved_port(self, port_no):
return port_no > self.ofproto.OFPP_MAX
def datapath_connection_factory(socket, address):
LOG.debug('connected socket:%s address:%s', socket, address)
with contextlib.closing(Datapath(socket, address)) as datapath:
try:
datapath.serve()
except:
# Something went wrong.
# Especially malicious switch can send malformed packet,
# the parser raise exception.
# Can we do anything more graceful?
if datapath.id is None:
dpid_str = "%s" % datapath.id
else:
dpid_str = dpid_to_str(datapath.id)
LOG.error("Error in the datapath %s from %s", dpid_str, address)
raise
|
flh/odoo | refs/heads/master | addons/decimal_precision/tests/test_qweb_float.py | 103 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestFloatExport(common.TransactionCase):
def setUp(self):
super(TestFloatExport, self).setUp()
self.Model = self.registry('decimal.precision.test')
def get_converter(self, name):
converter = self.registry('ir.qweb.field.float')
column = self.Model._all_columns[name].column
return lambda value, options=None: converter.value_to_html(
self.cr, self.uid, value, column, options=options, context=None)
def test_basic_float(self):
converter = self.get_converter('float')
self.assertEqual(
converter(42.0),
"42.0")
self.assertEqual(
converter(42.12345),
"42.12345")
converter = self.get_converter('float_2')
self.assertEqual(
converter(42.0),
"42.00")
self.assertEqual(
converter(42.12345),
"42.12")
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.0),
'42.0000')
self.assertEqual(
converter(42.12345),
'42.1234')
def test_precision_domain(self):
DP = self.registry('decimal.precision')
DP.create(self.cr, self.uid, {
'name': 'A',
'digits': 2,
})
DP.create(self.cr, self.uid, {
'name': 'B',
'digits': 6,
})
converter = self.get_converter('float')
self.assertEqual(
converter(42.0, {'decimal_precision': 'A'}),
'42.00')
self.assertEqual(
converter(42.0, {'decimal_precision': 'B'}),
'42.000000')
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'A'}),
'42.12')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'B'}),
'42.123450')
|
Icenowy/MissionPlanner | refs/heads/master | Lib/encodings/cp866.py | 93 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u2116' # 0x00fc -> NUMERO SIGN
u'\xa4' # 0x00fd -> CURRENCY SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
Tiotao/morpherpy | refs/heads/master | env/Lib/site-packages/requests/api.py | 637 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
|
Pablo126/SSBW | refs/heads/master | Tarea4/tarea4/lib/python3.5/site-packages/django/contrib/gis/gdal/libgdal.py | 64 | from __future__ import unicode_literals
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, c_char_p, c_int
from ctypes.util import find_library
from django.contrib.gis.gdal.error import GDALException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared libraries
lib_names = ['gdal111', 'gdal110', 'gdal19', 'gdal18', 'gdal17']
elif os.name == 'posix':
# *NIX library names.
lib_names = ['gdal', 'GDAL', 'gdal1.11.0', 'gdal1.10.0', 'gdal1.9.0', 'gdal1.8.0', 'gdal1.7.0']
else:
raise GDALException('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise GDALException(
'Could not find the GDAL library (tried "%s"). Try setting '
'GDAL_LIBRARY_PATH in your settings.' % '", "'.join(lib_names)
)
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Returns the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Returns GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Returns only the GDAL version number information."
return _version_info(b'RELEASE_NAME')
def gdal_full_version():
"Returns the full GDAL version information."
return _version_info('')
version_regex = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<subminor>\d+))?')
def gdal_version_info():
ver = gdal_version().decode()
m = version_regex.match(ver)
if not m:
raise GDALException('Could not parse GDAL version string "%s"' % ver)
return {key: m.group(key) for key in ('major', 'minor', 'subminor')}
_verinfo = gdal_version_info()
GDAL_MAJOR_VERSION = int(_verinfo['major'])
GDAL_MINOR_VERSION = int(_verinfo['minor'])
GDAL_SUBMINOR_VERSION = _verinfo['subminor'] and int(_verinfo['subminor'])
GDAL_VERSION = (GDAL_MAJOR_VERSION, GDAL_MINOR_VERSION, GDAL_SUBMINOR_VERSION)
del _verinfo
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error('GDAL_ERROR %d: %s', error_number, message)
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function('CPLSetErrorHandler', [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
|
gdi2290/django | refs/heads/master | django/contrib/auth/tests/test_tokens.py | 117 | from datetime import date, timedelta
import sys
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
@skipIfCustomUser
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(sys.version_info[:2] >= (3, 0), "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
|
akionakamura/scikit-learn | refs/heads/master | examples/datasets/plot_random_dataset.py | 348 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
miltonruelas/cursotecnico | refs/heads/7.0 | branch/stock_serial/__openerp__.py | 1 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Stock and Production Serial Number",
"version": "1.0",
"description": """
Adds unique serial numbers to the production lots and stock moves
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Warehouse",
"depends": ["stock",
"sale",
],
"data":[
"stock_view.xml",
"product_view.xml",
"stock_serial_view.xml",
"stock_serial.xml",
],
"demo_xml": [],
"update_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sourlows/rating-cruncher | refs/heads/master | test/lib/pbr/packaging.py | 9 | # Copyright 2011 OpenStack LLC.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
from __future__ import unicode_literals
from distutils.command import install as du_install
from distutils import log
import email
import os
import re
import sys
import pkg_resources
from setuptools.command import develop
from setuptools.command import easy_install
from setuptools.command import egg_info
from setuptools.command import install
from setuptools.command import install_scripts
from setuptools.command import sdist
from pbr import extra_files
from pbr import git
from pbr import options
import pbr.pbr_json
from pbr import testr_command
from pbr import version
REQUIREMENTS_FILES = ('requirements.txt', 'tools/pip-requires')
TEST_REQUIREMENTS_FILES = ('test-requirements.txt', 'tools/test-requires')
def get_requirements_files():
files = os.environ.get("PBR_REQUIREMENTS_FILES")
if files:
return tuple(f.strip() for f in files.split(','))
# Returns a list composed of:
# - REQUIREMENTS_FILES with -py2 or -py3 in the name
# (e.g. requirements-py3.txt)
# - REQUIREMENTS_FILES
return (list(map(('-py' + str(sys.version_info[0])).join,
map(os.path.splitext, REQUIREMENTS_FILES)))
+ list(REQUIREMENTS_FILES))
def append_text_list(config, key, text_list):
"""Append a \n separated list to possibly existing value."""
new_value = []
current_value = config.get(key, "")
if current_value:
new_value.append(current_value)
new_value.extend(text_list)
config[key] = '\n'.join(new_value)
def _any_existing(file_list):
return [f for f in file_list if os.path.exists(f)]
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in _any_existing(requirements_files):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=None, strip_markers=False):
if requirements_files is None:
requirements_files = get_requirements_files()
def egg_fragment(match):
# take a versioned egg fragment and return a
# versioned package requirement e.g.
# nova-1.2.3 becomes nova>=1.2.3
return re.sub(r'([\w.]+)-([\w.-]+)',
r'\1>=\2',
match.group(1))
requirements = []
for line in get_reqs_from_files(requirements_files):
# Ignore comments
if (not line.strip()) or line.startswith('#'):
continue
# Handle nested requirements files such as:
# -r other-requirements.txt
if line.startswith('-r'):
req_file = line.partition(' ')[2]
requirements += parse_requirements(
[req_file], strip_markers=strip_markers)
continue
try:
project_name = pkg_resources.Requirement.parse(line).project_name
except ValueError:
project_name = None
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
# -e git://github.com/openstack/nova/master#egg=nova-1.2.3
if re.match(r'\s*-e\s+', line):
line = re.sub(r'\s*-e\s+.*#egg=(.*)$', egg_fragment, line)
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
# http://github.com/openstack/nova/zipball/master#egg=nova-1.2.3
elif re.match(r'\s*https?:', line):
line = re.sub(r'\s*https?:.*#egg=(.*)$', egg_fragment, line)
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
line = None
reason = 'Index Location'
if line is not None:
if strip_markers:
semi_pos = line.find(';')
if semi_pos < 0:
semi_pos = None
line = line[:semi_pos]
requirements.append(line)
else:
log.info(
'[pbr] Excluding %s: %s' % (project_name, reason))
return requirements
def parse_dependency_links(requirements_files=None):
if requirements_files is None:
requirements_files = get_requirements_files()
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
class InstallWithGit(install.install):
"""Extracts ChangeLog and AUTHORS from git then installs.
This is useful for e.g. readthedocs where the package is
installed and then docs built.
"""
command_name = 'install'
def run(self):
_from_git(self.distribution)
return install.install.run(self)
class LocalInstall(install.install):
"""Runs python setup.py install in a sensible manner.
Force a non-egg installed in the manner of
single-version-externally-managed, which allows us to install manpages
and config files.
"""
command_name = 'install'
def run(self):
_from_git(self.distribution)
return du_install.install.run(self)
class TestrTest(testr_command.Testr):
"""Make setup.py test do the right thing."""
command_name = 'test'
def run(self):
# Can't use super - base class old-style class
testr_command.Testr.run(self)
def have_testr():
return testr_command.have_testr
try:
from nose import commands
class NoseTest(commands.nosetests):
"""Fallback test runner if testr is a no-go."""
command_name = 'test'
def run(self):
# Can't use super - base class old-style class
commands.nosetests.run(self)
_have_nose = True
except ImportError:
_have_nose = False
def have_nose():
return _have_nose
_wsgi_text = """#PBR Generated from %(group)r
from %(module_name)s import %(import_target)s
if __name__ == "__main__":
import argparse
import socket
import wsgiref.simple_server as wss
my_ip = socket.gethostbyname(socket.gethostname())
parser = argparse.ArgumentParser(
description=%(import_target)s.__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--port', '-p', type=int, default=8000,
help='TCP port to listen on')
args = parser.parse_args()
server = wss.make_server('', args.port, %(invoke_target)s())
print("*" * 80)
print("STARTING test server %(module_name)s.%(invoke_target)s")
url = "http://%%s:%%d/" %% (my_ip, server.server_port)
print("Available at %%s" %% url)
print("DANGER! For testing only, do not use in production")
print("*" * 80)
server.serve_forever()
else:
application = %(invoke_target)s()
"""
_script_text = """# PBR Generated from %(group)r
import sys
from %(module_name)s import %(import_target)s
if __name__ == "__main__":
sys.exit(%(invoke_target)s())
"""
# the following allows us to specify different templates per entry
# point group when generating pbr scripts.
ENTRY_POINTS_MAP = {
'console_scripts': _script_text,
'gui_scripts': _script_text,
'wsgi_scripts': _wsgi_text
}
def override_get_script_args(
dist, executable=os.path.normpath(sys.executable), is_wininst=False):
"""Override entrypoints console_script."""
header = easy_install.get_script_header("", executable, is_wininst)
for group, template in ENTRY_POINTS_MAP.items():
for name, ep in dist.get_entry_map(group).items():
if not ep.attrs or len(ep.attrs) > 2:
raise ValueError("Script targets must be of the form "
"'func' or 'Class.class_method'.")
script_text = template % dict(
group=group,
module_name=ep.module_name,
import_target=ep.attrs[0],
invoke_target='.'.join(ep.attrs),
)
yield (name, header + script_text)
class LocalDevelop(develop.develop):
command_name = 'develop'
def install_wrapper_scripts(self, dist):
if sys.platform == 'win32':
return develop.develop.install_wrapper_scripts(self, dist)
if not self.exclude_scripts:
for args in override_get_script_args(dist):
self.write_script(*args)
class LocalInstallScripts(install_scripts.install_scripts):
"""Intercepts console scripts entry_points."""
command_name = 'install_scripts'
def run(self):
import distutils.command.install_scripts
self.run_command("egg_info")
if self.distribution.scripts:
# run first to set up self.outfiles
distutils.command.install_scripts.install_scripts.run(self)
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = pkg_resources.Distribution(
ei_cmd.egg_base,
pkg_resources.PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(
bs_cmd, 'executable', easy_install.sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
if os.name != 'nt':
get_script_args = override_get_script_args
else:
get_script_args = easy_install.get_script_args
executable = '"%s"' % executable
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
class LocalManifestMaker(egg_info.manifest_maker):
"""Add any files that are in git and some standard sensible files."""
def _add_pbr_defaults(self):
for template_line in [
'include AUTHORS',
'include ChangeLog',
'exclude .gitignore',
'exclude .gitreview',
'global-exclude *.pyc'
]:
self.filelist.process_template_line(template_line)
def add_defaults(self):
option_dict = self.distribution.get_option_dict('pbr')
sdist.sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
self.filelist.extend(extra_files.get_extra_files())
should_skip = options.get_boolean_option(option_dict, 'skip_git_sdist',
'SKIP_GIT_SDIST')
if not should_skip:
rcfiles = git._find_git_files()
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self._add_pbr_defaults()
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
class LocalEggInfo(egg_info.egg_info):
"""Override the egg_info command to regenerate SOURCES.txt sensibly."""
command_name = 'egg_info'
def find_sources(self):
"""Generate SOURCES.txt only if there isn't one already.
If we are in an sdist command, then we always want to update
SOURCES.txt. If we are not in an sdist command, then it doesn't
matter one flip, and is actually destructive.
However, if we're in a git context, it's always the right thing to do
to recreate SOURCES.txt
"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
if (not os.path.exists(manifest_filename) or
os.path.exists('.git') or
'sdist' in sys.argv):
log.info("[pbr] Processing SOURCES.txt")
mm = LocalManifestMaker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
else:
log.info("[pbr] Reusing existing SOURCES.txt")
self.filelist = egg_info.FileList()
for entry in open(manifest_filename, 'r').read().split('\n'):
self.filelist.append(entry)
def _from_git(distribution):
option_dict = distribution.get_option_dict('pbr')
changelog = git._iter_log_oneline(option_dict=option_dict)
if changelog:
changelog = git._iter_changelog(changelog)
git.write_git_changelog(option_dict=option_dict, changelog=changelog)
git.generate_authors(option_dict=option_dict)
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
command_name = 'sdist'
def run(self):
_from_git(self.distribution)
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
try:
from pbr import builddoc
_have_sphinx = True
# Import the symbols from their new home so the package API stays
# compatible.
LocalBuildDoc = builddoc.LocalBuildDoc
LocalBuildLatex = builddoc.LocalBuildLatex
except ImportError:
_have_sphinx = False
LocalBuildDoc = None
LocalBuildLatex = None
def have_sphinx():
return _have_sphinx
def _get_increment_kwargs(git_dir, tag):
"""Calculate the sort of semver increment needed from git history.
Every commit from HEAD to tag is consider for Sem-Ver metadata lines.
See the pbr docs for their syntax.
:return: a dict of kwargs for passing into SemanticVersion.increment.
"""
result = {}
if tag:
version_spec = tag + "..HEAD"
else:
version_spec = "HEAD"
changelog = git._run_git_command(['log', version_spec], git_dir)
header_len = len(' sem-ver:')
commands = [line[header_len:].strip() for line in changelog.split('\n')
if line.lower().startswith(' sem-ver:')]
symbols = set()
for command in commands:
symbols.update([symbol.strip() for symbol in command.split(',')])
def _handle_symbol(symbol, symbols, impact):
if symbol in symbols:
result[impact] = True
symbols.discard(symbol)
_handle_symbol('bugfix', symbols, 'patch')
_handle_symbol('feature', symbols, 'minor')
_handle_symbol('deprecation', symbols, 'minor')
_handle_symbol('api-break', symbols, 'major')
for symbol in symbols:
log.info('[pbr] Unknown Sem-Ver symbol %r' % symbol)
# We don't want patch in the kwargs since it is not a keyword argument -
# its the default minimum increment.
result.pop('patch', None)
return result
def _get_revno_and_last_tag(git_dir):
"""Return the commit data about the most recent tag.
We use git-describe to find this out, but if there are no
tags then we fall back to counting commits since the beginning
of time.
"""
changelog = git._iter_log_oneline(git_dir=git_dir)
row_count = 0
for row_count, (ignored, tag_set, ignored) in enumerate(changelog):
version_tags = set()
for tag in list(tag_set):
try:
version_tags.add(version.SemanticVersion.from_pip_string(tag))
except Exception:
pass
if version_tags:
return max(version_tags).release_string(), row_count
return "", row_count
def _get_version_from_git_target(git_dir, target_version):
"""Calculate a version from a target version in git_dir.
This is used for untagged versions only. A new version is calculated as
necessary based on git metadata - distance to tags, current hash, contents
of commit messages.
:param git_dir: The git directory we're working from.
:param target_version: If None, the last tagged version (or 0 if there are
no tags yet) is incremented as needed to produce an appropriate target
version following semver rules. Otherwise target_version is used as a
constraint - if semver rules would result in a newer version then an
exception is raised.
:return: A semver version object.
"""
tag, distance = _get_revno_and_last_tag(git_dir)
last_semver = version.SemanticVersion.from_pip_string(tag or '0')
if distance == 0:
new_version = last_semver
else:
new_version = last_semver.increment(
**_get_increment_kwargs(git_dir, tag))
if target_version is not None and new_version > target_version:
raise ValueError(
"git history requires a target version of %(new)s, but target "
"version is %(target)s" %
dict(new=new_version, target=target_version))
if distance == 0:
return last_semver
new_dev = new_version.to_dev(distance)
if target_version is not None:
target_dev = target_version.to_dev(distance)
if target_dev > new_dev:
return target_dev
return new_dev
def _get_version_from_git(pre_version=None):
"""Calculate a version string from git.
If the revision is tagged, return that. Otherwise calculate a semantic
version description of the tree.
The number of revisions since the last tag is included in the dev counter
in the version for untagged versions.
:param pre_version: If supplied use this as the target version rather than
inferring one from the last tag + commit messages.
"""
git_dir = git._run_git_functions()
if git_dir:
try:
tagged = git._run_git_command(
['describe', '--exact-match'], git_dir,
throw_on_error=True).replace('-', '.')
target_version = version.SemanticVersion.from_pip_string(tagged)
except Exception:
if pre_version:
# not released yet - use pre_version as the target
target_version = version.SemanticVersion.from_pip_string(
pre_version)
else:
# not released yet - just calculate from git history
target_version = None
result = _get_version_from_git_target(git_dir, target_version)
return result.release_string()
# If we don't know the version, return an empty string so at least
# the downstream users of the value always have the same type of
# object to work with.
try:
return unicode()
except NameError:
return ''
def _get_version_from_pkg_metadata(package_name):
"""Get the version from package metadata if present.
This looks for PKG-INFO if present (for sdists), and if not looks
for METADATA (for wheels) and failing that will return None.
"""
pkg_metadata_filenames = ['PKG-INFO', 'METADATA']
pkg_metadata = {}
for filename in pkg_metadata_filenames:
try:
pkg_metadata_file = open(filename, 'r')
except (IOError, OSError):
continue
try:
pkg_metadata = email.message_from_file(pkg_metadata_file)
except email.MessageError:
continue
# Check to make sure we're in our own dir
if pkg_metadata.get('Name', None) != package_name:
return None
return pkg_metadata.get('Version', None)
def get_version(package_name, pre_version=None):
"""Get the version of the project. First, try getting it from PKG-INFO or
METADATA, if it exists. If it does, that means we're in a distribution
tarball or that install has happened. Otherwise, if there is no PKG-INFO
or METADATA file, pull the version from git.
We do not support setup.py version sanity in git archive tarballs, nor do
we support packagers directly sucking our git repo into theirs. We expect
that a source tarball be made from our git repo - or that if someone wants
to make a source tarball from a fork of our repo with additional tags in it
that they understand and desire the results of doing that.
:param pre_version: The version field from setup.cfg - if set then this
version will be the next release.
"""
version = os.environ.get(
"PBR_VERSION",
os.environ.get("OSLO_PACKAGE_VERSION", None))
if version:
return version
version = _get_version_from_pkg_metadata(package_name)
if version:
return version
version = _get_version_from_git(pre_version)
# Handle http://bugs.python.org/issue11638
# version will either be an empty unicode string or a valid
# unicode version string, but either way it's unicode and needs to
# be encoded.
if sys.version_info[0] == 2:
version = version.encode('utf-8')
if version:
return version
raise Exception("Versioning for this project requires either an sdist"
" tarball, or access to an upstream git repository."
" Are you sure that git is installed?")
# This is added because pbr uses pbr to install itself. That means that
# any changes to the egg info writer entrypoints must be forward and
# backward compatible. This maintains the pbr.packaging.write_pbr_json
# path.
write_pbr_json = pbr.pbr_json.write_pbr_json
|
veger/ansible | refs/heads/devel | test/units/modules/network/f5/test_bigip_device_group_member.py | 21 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_group_member import Parameters
from library.modules.bigip_device_group_member import ModuleManager
from library.modules.bigip_device_group_member import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_group_member import Parameters
from ansible.modules.network.f5.bigip_device_group_member import ModuleManager
from ansible.modules.network.f5.bigip_device_group_member import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='bigip1',
device_group='dg1'
)
p = Parameters(params=args)
assert p.name == 'bigip1'
assert p.device_group == 'dg1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(
dict(
name="bigip1",
device_group="dg1",
state="present",
server='localhost',
user='admin',
password='password'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
|
bolkedebruin/airflow | refs/heads/master | airflow/migrations/versions/0a2a5b66e19d_add_task_reschedule_table.py | 6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add task_reschedule table
Revision ID: 0a2a5b66e19d
Revises: 9635ae0956e7
Create Date: 2018-06-17 22:50:00.053620
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '0a2a5b66e19d'
down_revision = '9635ae0956e7'
branch_labels = None
depends_on = None
TABLE_NAME = 'task_reschedule'
INDEX_NAME = 'idx_' + TABLE_NAME + '_dag_task_date'
# For Microsoft SQL Server, TIMESTAMP is a row-id type,
# having nothing to do with date-time. DateTime() will
# be sufficient.
def mssql_timestamp():
return sa.DateTime()
def mysql_timestamp():
return mysql.TIMESTAMP(fsp=6)
def sa_timestamp():
return sa.TIMESTAMP(timezone=True)
def upgrade():
# See 0e2a74e0fc9f_add_time_zone_awareness
conn = op.get_bind()
if conn.dialect.name == 'mysql':
timestamp = mysql_timestamp
elif conn.dialect.name == 'mssql':
timestamp = mssql_timestamp
else:
timestamp = sa_timestamp
op.create_table(
TABLE_NAME,
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
# use explicit server_default=None otherwise mysql implies defaults for first timestamp column
sa.Column('execution_date', timestamp(), nullable=False, server_default=None),
sa.Column('try_number', sa.Integer(), nullable=False),
sa.Column('start_date', timestamp(), nullable=False),
sa.Column('end_date', timestamp(), nullable=False),
sa.Column('duration', sa.Integer(), nullable=False),
sa.Column('reschedule_date', timestamp(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(
['task_id', 'dag_id', 'execution_date'],
['task_instance.task_id', 'task_instance.dag_id', 'task_instance.execution_date'],
name='task_reschedule_dag_task_date_fkey')
)
op.create_index(
INDEX_NAME,
TABLE_NAME,
['dag_id', 'task_id', 'execution_date'],
unique=False
)
def downgrade():
op.drop_index(INDEX_NAME, table_name=TABLE_NAME)
op.drop_table(TABLE_NAME)
|
JarbasAI/JarbasAI | refs/heads/patch-15 | mycroft/util/format.py | 1 | # -*- coding: iso-8859-15 -*-
# Copyright 2017 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import math
from mycroft.util.parse import extractnumber, is_numeric, normalize
FRACTION_STRING_EN = {
2: 'half',
3: 'third',
4: 'forth',
5: 'fifth',
6: 'sixth',
7: 'seventh',
8: 'eigth',
9: 'ninth',
10: 'tenth',
11: 'eleventh',
12: 'twelveth',
13: 'thirteenth',
14: 'fourteenth',
15: 'fifteenth',
16: 'sixteenth',
17: 'seventeenth',
18: 'eighteenth',
19: 'nineteenth',
20: 'twentyith'
}
FRACTION_STRING_PT = {
2: 'meio',
3: u'terço',
4: 'quarto',
5: 'quinto',
6: 'sexto',
7: u'sétimo',
8: 'oitavo',
9: 'nono',
10: u'décimo',
11: 'onze avos',
12: 'doze avos',
13: 'treze avos',
14: 'catorze avos',
15: 'quinze avos',
16: 'dezasseis avos',
17: 'dezassete avos',
18: 'dezoito avos',
19: 'dezanove avos',
20: u'vigésimo',
30: u'trigésimo',
100: u'centésimo',
1000: u'milésimo'
}
def nice_number(number, lang="en-us", speech=True, denominators=None):
"""Format a float to human readable functions
This function formats a float to human understandable functions. Like
4.5 becomes 4 and a half for speech and 4 1/2 for text
Args:
number (str): the float to format
lang (str): the code for the language text is in
speech (bool): to return speech representation or text representation
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_number(number, denominators)
if not result:
return str(round(number, 3))
if not speech:
whole, num, den = result
if num == 0:
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
lang_lower = str(lang).lower()
if lang_lower.startswith("en"):
return nice_number_en(result)
elif lang_lower.startswith("pt"):
return nice_number_pt(result)
# TODO: Normalization for other languages
return str(number)
def nice_number_en(result):
""" English conversion for nice_number """
whole, num, den = result
if num == 0:
return str(whole)
den_str = FRACTION_STRING_EN[den]
if whole == 0:
if num == 1:
return_string = 'a {}'.format(den_str)
else:
return_string = '{} {}'.format(num, den_str)
elif num == 1:
return_string = '{} and a {}'.format(whole, den_str)
else:
return_string = '{} and {} {}'.format(whole, num, den_str)
if num > 1:
return_string += 's'
return return_string
def nice_number_pt(result):
""" Portuguese conversion for nice_number """
whole, num, den = result
if num == 0:
return str(whole)
# denominador
den_str = FRACTION_STRING_PT[den]
# fracções
if whole == 0:
if num == 1:
# um décimo
return_string = 'um {}'.format(den_str)
else:
# três meio
return_string = '{} {}'.format(num, den_str)
# inteiros >10
elif num == 1:
# trinta e um
return_string = '{} e {}'.format(whole, den_str)
# inteiros >10 com fracções
else:
# vinte e 3 décimo
return_string = '{} e {} {}'.format(whole, num, den_str)
# plural
if num > 1:
return_string += 's'
return return_string
def convert_number(number, denominators):
""" Convert floats to mixed fractions """
int_number = int(number)
if int_number == number:
return int_number, 0, 1
frac_number = abs(number - int_number)
if not denominators:
denominators = range(1, 21)
for denominator in denominators:
numerator = abs(frac_number) * denominator
if (abs(numerator - round(numerator)) < 0.01):
break
else:
return None
return int_number, int(round(numerator)), denominator
math_operations = ["+", "-", "/", "*", "!", "^", "**", "exp", "log", "sqrt",
"(",
")", "sqr", "qb", "pow", "=", "is"]
class ElementalOperation():
def __init__(self, x=0, y=0, op="+"):
self.constants = {"pi": math.pi}
self.variables = {}
self.num_1 = x
self.num_2 = y
self.op = str(op)
self.nice = False
def set_nice(self):
self.nice = True
def define_var(self, var, value="unknown"):
self.variables[var] = str(value)
def _operate(self, operation, x=0, y=1):
# TODO equations
if operation == "is":
self.define_var(str(x), str(y))
return str(x) + " = " + str(y)
if str(x) in self.constants:
x = self.constants[str(x)]
if str(y) in self.constants:
y = self.constants[str(y)]
if str(x) in self.variables:
x = self.variables[str(x)]
if str(y) in self.variables:
y = self.variables[str(y)]
if not is_numeric(x) and is_numeric(y):
return str(x) + " " + operation + " " + str(float(y))
if is_numeric(x) and not is_numeric(y):
return str(float(x)) + " " + operation + " " + str(y)
if not is_numeric(x) and not is_numeric(y):
return str(x) + " " + operation + " " + str(y)
x = float(x)
y = float(y)
result = ""
if operation == "+":
result = x + y
elif operation == "/":
result = x / y
elif operation == "-":
result = x - y
elif operation == "*":
result = x * y
elif operation == "%":
result = x % y
elif operation == "sqrt":
result = math.sqrt(x)
elif operation == "!":
result = math.factorial(x)
elif operation == "log":
result = math.log(x, y)
elif operation == "exp":
result = math.exp(x)
elif operation in ["^", "**", "pow"]:
result = math.pow(x, y)
elif operation == "sqr":
result = math.pow(x, 2)
elif operation == "qb":
result = math.pow(x, 3)
# TODO priority
elif operation == "(":
pass
elif operation == ")":
pass
if self.nice:
result = nice_number(float(result)).replace(" ", "_")
return result
def get_expression(self):
return str(self.num_1) + " " + self.op + " " + str(self.num_2)
def operate(self):
x = self.num_1
y = self.num_2
if x in self.constants:
x = self.constants[x]
if y in self.constants:
y = self.constants[y]
if x in self.variables:
x = self.variables[x] if self.variables[x] != "unknown" else x
if y in self.variables:
y = self.variables[y] if self.variables[y] != "unknown" else y
return self._operate(self.op, x, y)
def set(self, x, y, op):
self.num_1 = x
self.num_2 = y
self.op = str(op)
class StringOperation():
def __init__(self, input_str, variables=None, nice=False, lang="en-us"):
self.lang = lang
self.nice = nice
if variables is None:
variables = {}
self.variables = variables
self.raw_str = input_str
self.string = normalize(input_str, self.lang)
if is_numeric(self.string):
self.input_operations = [["0.0", "+", self.string]]
self.chain = [self.string]
self.result = self.chain[0]
else:
self.input_operations, self.leftover = extract_expression(
self.string, self.lang)
self.chain, self.result = self._chained_operation(
self.input_operations)
self._get_result()
def _get_vars_data(self, op):
num1 = ""
num2 = ""
var1 = op[0]
var2 = op[2]
for i in range(0, len(op[0])):
char = op[0][i]
if is_numeric(char) or char == ".":
num1 += char
var1 = op[0][i + 1:]
for i in range(0, len(op[2])):
char = op[2][i]
if is_numeric(char) or char == ".":
num2 += char
var2 = op[2][i + 1:]
if not num1:
num1 = "1"
if not num2:
num2 = "1"
return num1, num2, var1, var2
def _org_var(self, operations):
passes = [
# ["=", "is"],
# ["!", "exp", "log", "^", "sqrt", "**", "sqr", "qb", "pow"],
["*", "/", "%"],
["+", "-"]
]
# handle vars
inserts = {}
for idx, op in enumerate(operations):
if not op:
continue
num1, num2, var1, var2 = self._get_vars_data(op)
# print num1, num2, var1, var2
if not is_numeric(op[0]) and op[0] != "prev":
operations[idx][0] = op[0] = "prev"
operation = [num1, "*", var1]
inserts[idx] = operation
elif not is_numeric(op[2]) and op[2] != "next":
operations[idx][2] = op[2] = "next"
operation = [num2, "*", var2]
inserts[idx + 1] = operation
# TODO find ( and )
new = operations
for insert in inserts:
new.insert(insert, inserts[insert])
# solve vars
OP = ElementalOperation("0.0", "0.0", "+")
for current_pass in passes:
for idx, op in enumerate(operations):
if not op:
continue
# find info
op[0] = str(op[0])
op[2] = str(op[2])
num1, num2, var1, var2 = self._get_vars_data(op)
prev_op = operations[idx - 1] if idx - 1 >= 0 else ""
next_op = operations[idx + 1] if idx + 1 < len(
operations) else ""
# chain with previous
if prev_op:
prev_op[0] = str(prev_op[0])
prev_op[2] = str(prev_op[2])
prevnum1, prevnum2, prevvar1, vprevar2 = self._get_vars_data(
prev_op)
if not is_numeric(op[0]) and op[0] != "prev":
if prev_op[1] in current_pass:
if prev_op[2] != "next" and prevvar2 == var1:
OP.set(prevnum2, num1, prev_op[1])
if self.nice:
OP.set_nice()
OP.variables = self.variables
result = str(OP.operate())
operations[idx - 1][2] = result + var1
operations[idx][0] = "prev"
if op[1] not in current_pass:
continue
continue
if op[1] == "(":
exp = []
end = -1
for i, n_op in enumerate(operations):
if not n_op:
continue
if n_op[1] == ")":
end = i
break
if end == -1:
operations[idx] = ""
continue
# get expression between ( )
exp = operations[idx, end]
# solve
res = self._chained_operation(exp)[1]
# extract solved ops
exp, leftover = extract_expression(res)
# replace in chain
operations = operations[:idx] + exp + operations[end:]
# count vars
return operations
def _chained_operation(self, operations):
# this operation object will contain all var definitions and be
# re-set and re used internally
OP = ElementalOperation()
if self.nice:
OP.set_nice()
OP.variables = self.variables
# prioritize operations by this order
passes = [
# ["=", "is"],
["!", "exp", "log", "^", "sqrt", "**", "sqr", "qb", "pow"],
["*", "/", "%"],
["+", "-"]
]
# organize vars and handle parenthesis
# operations = self._org_var(operations)
for current_pass in passes:
for idx, op in enumerate(operations):
if not op or op[1] not in current_pass:
continue
if is_numeric(op[0]):
op[0] = float(op[0])
if is_numeric(op[2]):
op[2] = float(op[2])
op[0] = str(op[0])
op[2] = str(op[2])
# find nums and vars
prev_op = operations[idx - 1] if idx - 1 >= 0 else ""
next_op = operations[idx + 1] if idx + 1 < len(
operations) else ""
num1 = ""
num2 = ""
var1 = op[0]
var2 = op[2]
for i in range(0, len(op[0])):
char = op[0][i]
if is_numeric(char) or char == ".":
num1 += char
var1 = op[0][i + 1:]
for i in range(0, len(op[2])):
char = op[2][i]
if is_numeric(char) or char == ".":
num2 += char
var2 = op[2][i + 1:]
# check for numbers
if op[0] in OP.constants:
op[0] = OP.constants[op[0]]
if op[0] in OP.variables:
op[0] = OP.variables[op[0]]
if op[2] in OP.constants:
op[2] = OP.constants[op[2]]
if op[2] in OP.variables:
op[2] = OP.variables[op[2]]
if is_numeric(op[0]) and op[1] in ["!", "exp", "sqrt", "^",
"**", "qb", "sqr", "pow",
"log"]:
OP.set(op[0], op[0], op[1])
result = OP.operate()
operations[idx] = ["0.0", "+", result]
continue
# chain operation with previous member
if op[0] == "prev":
if prev_op == ["0.0", "+", "0.0"]:
operations[idx][0] = "0.0"
operations[idx - 1] = ""
elif prev_op:
if is_numeric(prev_op[2]):
OP.set(prev_op[2], op[2], op[1])
operations[idx - 1][2] = "next"
result = OP.operate()
operations[idx] = ["0.0", "+", result]
elif prev_op[2] == "next":
operations[idx] = [prev_op[0], "+", op[2]]
operations[idx - 1] = ""
# all numbers, solve operation
if is_numeric(op[0]) and is_numeric(op[2]):
OP.set(op[0], op[2], op[1])
result = OP.operate()
operations[idx] = ["0.0", "+", result]
continue
if is_numeric(op[0]) and not is_numeric(op[2]):
pass
if not is_numeric(op[0]) and is_numeric(op[2]):
pass
if not is_numeric(op[0]) and not is_numeric(op[2]):
# same expression both sides
if op[0] == op[2]:
num = num1
if op[1] == "-":
operations[idx] = ["0.0", "+", "0.0"]
continue
if op[1] == "/":
operations[idx] = ["0.0", "+", "1.0"]
continue
if op[1] == "*":
operations[idx] = [op[0], "sqr", op[0]]
continue
if op[1] == "+":
if not num:
operations[idx] = ["0.0", "+", "2.0" + op[0]]
continue
op[0] = op[0].replace(str(num), "")
num = str(2 * float(num))
operations[idx] = ["0.0", "+", num + op[0]]
continue
# TODO other ops ^ exp log sqr qb sqrt
# dif expression both sides
else:
if var1 == var2:
var = var1
OP.set(num1, num2, op[1])
result = str(OP.operate())
operations[idx] = ["0.0", "+", result + var]
self.variables = OP.variables
# clean empty elements
result = ""
chain = []
for op in operations:
chain.append(op)
for element in op:
if element and element != "prev" and element != "next":
# nice numbers in result
if not is_numeric(
element) and element not in math_operations:
if self.nice and element:
element = nice_var(element)
elif is_numeric(element) and self.nice:
element = nice_number(float(element)).replace(" ", "_")
result += str(element)
return chain, result
def _get_result(self, res=None):
# clean
if res is not None:
res = res.replace(" ", "")
else:
res = self.result.replace(" ", "")
if self.nice:
words = res.split(" ")
for idx, word in enumerate(words):
if is_numeric(word):
words[idx] = nice_number(float(word))
res = " ".join(words).replace(" ", "")
while res and res[0] == "+":
res = res[1:]
res = res.replace("next", "")
res = res.replace("prev", "")
res = res.replace("+-", "-")
res = res.replace("-+", "-")
res = res.replace("++", "+")
res = res.replace("--", "-")
res = res.replace("+*", "*")
res = res.replace("*+", "*")
res = res.replace("-*", "*-")
res = res.replace("/1", "")
res = res.replace("/1.0", "")
res = res.replace("sqr", " squared")
res = res.replace("qb", " cubed")
res = " " + res
for op in math_operations:
res = res.replace(op, " " + op + " ")
# crop start zeros
while " 0 + " in res or " 0.0 + " in res:
res = res.replace(" 0 + ", " ")
res = res.replace(" 0.0 + ", " ")
res = res.replace(" 0 - ", " -")
res = res.replace(" 0.0 - ", " -")
self.result = res
return res
def solve(self, debug=False):
if debug:
print "normalized string:", self.string
print "raw string:", self.raw_str
lang = self.lang
OP = StringOperation(self.raw_str, lang=lang, nice=self.nice)
res = OP.result
variables = OP.variables
if debug:
print "elementar operations:", OP.input_operations
print "result:", res
print "chain", OP.chain
i = 0
depth = 10
prev_res = ""
while not res == prev_res and i < depth:
prev_res = res
OP = StringOperation(res, variables=variables, lang=lang,
nice=self.nice)
res = OP.result
variables = OP.variables
if debug:
print"elementar operations:", OP.input_operations
print"result:", res
print "chain", OP.chain
i += 1
if debug:
print "vars:", OP.variables
print "\n"
# update result
self._get_result(res)
return self.result.replace("_", " ")
def nice_var(var_string):
num = ""
var = ""
for i in range(0, len(var_string)):
char = var_string[i]
if is_numeric(char) or char == ".":
num += char
var = var_string[i + 1:]
elif char in math_operations:
words = var_string.split(" ")
for idx, word in enumerate(words):
if word not in math_operations:
words[idx] = nice_var(word)
return " ".join(words)
if num:
num = nice_number(float(num)).replace(" ", "_")
var_string = num + var
return var_string
def solve_expression(string, nice=True, lang="en-us", debug=False):
OP = StringOperation(string, lang=lang, nice=nice)
res = OP.solve(debug=debug)
res = res.replace(" ", " ")
if res[0] == " ":
res = res[1:]
return res
def extract_expression(string, lang="en-us"):
lang_lower = str(lang).lower()
if lang_lower.startswith("en"):
return extract_expression_en(string)
def extract_expression_en(string):
expressions = {"+": ["add", "adding", "plus", "added"],
"-": ["subtract", "subtracting", "minus", "negative",
"subtracted"],
"/": ["divide", "dividing", "divided"],
"*": ["multiply", "multiplying", "times", "multiplied"],
"%": ["modulus"],
"!": ["factorial"],
# "is": ["set"], # TODO find better keyword for x = value
# "=": ["equals"],
"^": ["**", "^", "pow" "elevated", "power", "powered",
"raised"],
"sqr": ["squared"],
"sqrt": ["square_root"],
"qb": ["cubed", "qubed"],
"exp": ["exponent", "exponentiate", "exponentiated"],
"(": ["open"],
")": ["close"]}
noise_words = ["by", "and", "the", "in", "at", "a", "for", "an", "to",
"with", "off", "of", "is", "are", "can", "be"]
string = normalize(string)
# clean string
for op in expressions:
string = string.replace(op, " " + op + " ")
words = string.replace(",", "").replace("'", "").replace('"', "") \
.replace("square root", "square_root").split(" ")
# replace natural language math vocabulary
for idx, word in enumerate(words):
for operation in expressions:
if word in expressions[operation]:
words[idx] = operation
# convert all numbers
for idx, word in enumerate(words):
if not word:
continue
if extractnumber(word):
words[idx] = str(float(extractnumber(word)))
# join unknown vars nums
if idx + 1 < len(words) and words[idx + 1] not in math_operations:
# 3 x = 3x
if is_numeric(word) and not is_numeric(words[idx + 1] and words[
idx + 1] not in noise_words):
# words[idx] = str(float(words[idx])) + " * " + words[idx + 1]
words[idx] = str(float(words[idx])) + words[idx + 1]
words[idx + 1] = ""
if idx - 1 >= 0 and word not in math_operations:
# 1 2 x = 1 2x
if not is_numeric(word) and is_numeric(words[idx - 1]) and words[
idx] not in noise_words:
# words[idx] = words[idx - 1] + " * " + words[idx]
words[idx] = words[idx - 1] + words[idx]
words[idx - 1] = ""
words = [word for word in words if word]
# remove noise words
for idx, word in enumerate(words):
if word in noise_words:
words[idx] = ""
words = [word for word in words if word]
exps = []
# extract operations
for idx, word in enumerate(words):
if not word:
continue
# is an operation
if word in expressions:
operation = word
if operation == "(" or operation == ")":
exps.append(["prev", operation, "next"])
continue
if idx > 0:
woi = words[idx - 1:idx + 2]
words[idx - 1] = ""
if idx + 1 < len(words):
words[idx + 1] = ""
words[idx] = ""
x = woi[0]
try:
y = woi[2]
except:
y = "next"
if x == "":
x = "prev"
if operation == "sqrt":
x = y
exps.append([x, operation, y])
else:
# operation at first, is a sign
y = words[idx + 1]
words[idx + 1] = ""
words[idx] = ""
if operation == "-":
x = "-" + y
y = 0
operation = "+"
exps.append([x, operation, y])
elif operation == "+":
exps.append(["0", operation, y])
# or square root
elif operation == "sqrt":
x = y
y = "next"
exps.append([x, operation, y])
# TODO exponent, log
if not exps and extractnumber(string):
exps = [["0.0", "+", str(extractnumber(string))]]
words = [word for word in words if not is_numeric(word)]
if words == []:
words = ["___"]
leftover = ""
for word in words:
if not word and "___" not in leftover:
leftover += " ___"
elif word:
leftover += " " + word
leftover = leftover[1:]
return exps, leftover
|
jezdez/kuma | refs/heads/master | kuma/core/urlresolvers.py | 25 | import threading
from django.conf import settings
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse as django_reverse
from django.utils.translation.trans_real import parse_accept_lang_header
# Thread-local storage for URL prefixes. Access with (get|set)_url_prefix.
_locals = threading.local()
def get_best_language(accept_lang):
"""Given an Accept-Language header, return the best-matching language."""
ranked = parse_accept_lang_header(accept_lang)
return find_supported(ranked)
def set_url_prefixer(prefixer):
"""Set the Prefixer for the current thread."""
_locals.prefixer = prefixer
def reset_url_prefixer():
"""Set the Prefixer for the current thread."""
global _locals
_locals = threading.local()
def get_url_prefixer():
"""Get the Prefixer for the current thread, or None."""
return getattr(_locals, 'prefixer', None)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None,
current_app=None, force_locale=False, locale=None, unprefixed=False):
"""Wraps Django's reverse to prepend the correct locale.
force_locale -- Ordinarily, if get_url_prefixer() returns None, we return
an unlocalized URL, which will be localized via redirect when visited.
Set force_locale to True to force the insertion of a default locale
when there is no set prefixer. If you are writing a test and simply
wish to avoid LocaleURLMiddleware's initial 301 when passing in an
unprefixed URL, it is probably easier to substitute LocalizingClient
for any uses of django.test.client.Client and forgo this kwarg.
locale -- By default, reverse prepends the current locale (if set) or
the default locale if force_locale == True. To override this behavior
and have it prepend a different locale, pass in the locale parameter
with the desired locale. When passing a locale, the force_locale is
not used and is implicitly True.
"""
if locale:
prefixer = Prefixer(locale=locale)
else:
prefixer = get_url_prefixer()
if unprefixed:
prefixer = None
elif not prefixer and force_locale:
prefixer = Prefixer()
if prefixer:
prefix = prefix or '/'
url = django_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs,
prefix=prefix, current_app=current_app)
if prefixer:
return prefixer.fix(url)
else:
return url
def find_supported(ranked):
"""Given a ranked language list, return the best-matching locale."""
langs = dict(settings.LANGUAGE_URL_MAP)
for lang, _ in ranked:
lang = lang.lower()
if lang in langs:
return langs[lang]
# Add derived language tags to the end of the list as a fallback.
pre = '-'.join(lang.split('-')[0:-1])
if pre:
ranked.append((pre, None))
# Couldn't find any acceptable locale.
return False
def split_path(path):
"""
Split the requested path into (locale, path).
locale will be empty if it isn't found.
"""
path = path.lstrip('/')
# Use partition instead of split since it always returns 3 parts
first, _, rest = path.partition('/')
# Treat locale as a single-item ranked list.
lang = find_supported([(first, 1.0)])
if lang:
return lang, rest
else:
return '', path
class Prefixer(object):
def __init__(self, request=None, locale=None):
"""If request is omitted, fall back to a default locale."""
self.request = request or RequestFactory(REQUEST_METHOD='bogus').request()
self.locale, self.shortened_path = split_path(self.request.path_info)
if locale:
self.locale = locale
def get_language(self):
"""
Return a locale code we support on the site using the
user's Accept-Language header to determine which is best. This
mostly follows the RFCs but read bug 439568 for details.
"""
if 'lang' in self.request.GET:
lang = self.request.GET['lang'].lower()
if lang in settings.LANGUAGE_URL_MAP:
return settings.LANGUAGE_URL_MAP[lang]
if self.request.META.get('HTTP_ACCEPT_LANGUAGE'):
best = get_best_language(
self.request.META['HTTP_ACCEPT_LANGUAGE'])
if best:
return best
return settings.LANGUAGE_CODE
def fix(self, path):
path = path.lstrip('/')
url_parts = [self.request.META['SCRIPT_NAME']]
if path.endswith('/'):
check_path = path
else:
check_path = path + '/'
if not check_path.startswith(settings.LANGUAGE_URL_IGNORED_PATHS):
locale = self.locale if self.locale else self.get_language()
url_parts.append(locale)
url_parts.append(path)
return '/'.join(url_parts)
|
chubbymaggie/LSHBOX | refs/heads/master | include/eigen/debug/gdb/printers.py | 143 | # -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support any of the other eigen types
# Such as quaternion or some other type.
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * Create a directory and put the file as well as an empty __init__.py in
# that directory.
# * Create a ~/.gdbinit file, that contains the following:
# python
# import sys
# sys.path.insert(0, '/path/to/eigen/printer/directory')
# from printers import register_eigen_printers
# register_eigen_printers (None)
# end
import gdb
import re
import itertools
class EigenMatrixPrinter:
"Print Eigen Matrix or Array of some kind"
def __init__(self, variety, val):
"Extract all the necessary information"
# Save the variety (presumably "Matrix" or "Array") for later usage
self.variety = variety
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = map(lambda x:x.replace(" ", ""), template_params)
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
self.rows = val['m_storage']['m_rows']
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1':
self.cols = val['m_storage']['m_cols']
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, rows, cols, dataPtr, rowMajor):
self.rows = rows
self.cols = cols
self.dataPtr = dataPtr
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def children(self):
return self._iterator(self.rows, self.cols, self.data, self.rowMajor)
def to_string(self):
return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data)
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__ (self):
return self
def next(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (self.elementNames[element],), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data)
def build_eigen_dictionary ():
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val)
pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val)
def register_eigen_printers(obj):
"Register eigen pretty-printers with objfile Obj"
if obj == None:
obj = gdb
obj.pretty_printers.append(lookup_function)
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
build_eigen_dictionary ()
|
eusoubrasileiro/fatiando | refs/heads/sim-class-improvements | fatiando/gravmag/euler.py | 2 | """
Euler deconvolution methods for potential fields.
**Implementations**
* :class:`~fatiando.gravmag.euler.Classic`: The classic 3D solution to Euler's
equation for potential fields (Reid et al., 1990). Runs on the whole dataset.
**Solution selection procedures**
* :class:`~fatiando.gravmag.euler.ExpandingWindow`: Run a given Euler
deconvolution on an expanding window and keep the best estimate.
* :class:`~fatiando.gravmag.euler.MovingWindow`: Run a given Euler
deconvolution on a moving window to produce a set of estimates.
**References**
Reid, A. B., J. M. Allsop, H. Granser, A. J. Millett, and I. W. Somerton
(1990), Magnetic interpretation in three dimensions using Euler deconvolution,
Geophysics, 55(1), 80-91, doi:10.1190/1.1442774.
----
"""
from __future__ import division
import numpy
from .. import gridder
from ..inversion.base import Misfit
from ..utils import safe_inverse, safe_dot, safe_diagonal
class Classic(Misfit):
"""
Classic 3D Euler deconvolution of potential field data.
Follows the formulation of Reid et al. (1990). Performs the deconvolution
on the whole data set. For windowed approaches, use
:class:`~fatiando.gravmag.euler.ExpandingWindow`.
Works on any potential field that satisfies Euler's homogeneity equation.
.. note::
The data does **not** need to be gridded for this! So long as you
can calculate the derivatives of non-gridded data (using an Equivalent
Layer, for example).
.. note:: x is North, y is East, and z is down.
.. warning::
Units of the input data (x, y, z, field, derivatives) must be in SI
units! Otherwise, the results will be in strange units. Use functions
in :mod:`fatiando.utils` to convert between units.
Parameters:
* x, y, z : 1d-arrays
The x, y, and z coordinates of the observation points
* field : 1d-array
The potential field measured at the observation points
* xderiv, yderiv, zderiv : 1d-arrays
The x-, y-, and z-derivatives of the potential field (measured or
calculated) at the observation points
* index : float
The structural index of the source
"""
def __init__(self, x, y, z, field, xderiv, yderiv, zderiv,
structural_index):
if (len(x) != len(y) != len(z) != len(field) != len(xderiv)
!= len(yderiv) != len(zderiv)):
raise ValueError("x, y, z, field, xderiv, yderiv, zderiv must " +
"have the same number of elements")
if structural_index < 0:
raise ValueError("Invalid structural index '%g'. Should be >= 0"
% (structural_index))
super(Classic, self).__init__(
data=-x * xderiv - y * yderiv - z *
zderiv - structural_index * field,
positional=dict(x=x, y=y, z=z, field=field, xderiv=xderiv,
yderiv=yderiv, zderiv=zderiv),
model=dict(structural_index=structural_index),
nparams=4, islinear=True)
def _get_jacobian(self, p):
jac = numpy.transpose(
[-self.positional['xderiv'], -self.positional['yderiv'],
-self.positional['zderiv'],
-self.model['structural_index'] * numpy.ones(self.ndata)])
return jac
def _get_predicted(self, p):
return safe_dot(self.jacobian(p), p)
def fit(self):
"""
Solve the deconvolution on the whole data set.
Estimates an (x, y, z) point (stored in ``estimate_``) and a base level
(stored in ``baselevel_``).
"""
super(Classic, self).fit()
self._estimate = self.p_[:3]
self.baselevel_ = self.p_[3]
return self
class ExpandingWindow(object):
"""
Solve an Euler deconvolution problem using an expanding window scheme.
Uses data inside a window of growing size to perform the Euler
deconvolution. Keeps the best result, judged by the estimated error.
Like any other Euler solver, use the
:meth:`~fatiando.gravmag.euler.ExpandingWindow.fit` method to produce an
estimate. The estimated point is stored in ``estimate_``, the base level in
``baselevel_``.
Parameters:
* euler : Euler solver
An instance of an Euler deconvolution solver, like
:class:`~fatiando.gravmag.euler.Classic`.
* center : [x, y]
The x, y coordinates of the center of the expanding windows.
* sizes : list or 1d-array
The sizes of the windows.
"""
def __init__(self, euler, center, sizes):
self.euler = euler
self.center = center
self.sizes = sizes
self.estimate_ = None
self.p_ = None
def fit(self):
"""
Perform the Euler deconvolution with expanding windows.
The estimated point is stored in ``estimate_``, the base level in
``baselevel_``.
"""
xc, yc = self.center
euler = self.euler
x, y = euler.positional['x'], euler.positional['y']
results = []
errors = []
for size in self.sizes:
ds = 0.5 * size
xmin, xmax, ymin, ymax = xc - ds, xc + ds, yc - ds, yc + ds
indices = (x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax)
if not numpy.any(indices):
continue
solver = euler.subset(indices).fit()
cov = safe_inverse(solver.hessian(solver.p_))
uncertainty = numpy.sqrt(safe_diagonal(cov)[0:3])
mean_error = numpy.linalg.norm(uncertainty)
errors.append(mean_error)
results.append(solver.p_)
self.p_ = results[numpy.argmin(errors)]
self.estimate_ = self.p_[:3]
self.baselevel_ = self.p_[3]
return self
class MovingWindow(object):
"""
Solve an Euler deconvolution problem using a moving window scheme.
Uses data inside a window moving to perform the Euler deconvolution. Keeps
the estimate from all windows.
Like any other Euler solver, use the
:meth:`~fatiando.gravmag.euler.MovingWindow.fit` method to produce an
estimate. The estimated points are stored in ``estimate_``, the base levels
in ``baselevel_``.
Parameters:
* euler : Euler solver
An instance of an Euler deconvolution solver, like
:class:`~fatiando.gravmag.euler.Classic`.
* windows : (ny, nx)
The number of windows in the y and x directions
* size : (dy, dx)
The size of the windows in the y and x directions
* keep : float
Decimal percentage of solutions to keep. Will rank the solutions by
increasing error and keep only the first *keep* percent.
"""
def __init__(self, euler, windows, size, keep=0.2):
self.euler = euler
self.windows = windows
self.size = size
self.keep = keep
self.window_centers = None
self.estimate_ = None
self.p_ = None
def fit(self):
"""
Perform the Euler deconvolution on a moving window.
The estimated points are stored in ``estimate_``, the base levels in
``baselevel_``.
"""
ny, nx = self.windows
dy, dx = self.size
euler = self.euler
x, y = euler.positional['x'], euler.positional['y']
x1, x2, y1, y2 = x.min(), x.max(), y.min(), y.max()
paramvecs = []
estimates = []
baselevels = []
errors = []
# Thank you Saulinho for the solution!
# Calculate the mid-points of the windows
self.window_centers = []
xmidpoints = numpy.linspace(x1 + 0.5 * dx, x2 - 0.5 * dx, nx)
ymidpoints = numpy.linspace(y1 + 0.5 * dy, y2 - 0.5 * dy, ny)
for yc in ymidpoints:
for xc in xmidpoints:
self.window_centers.append([xc, yc])
# Separate the indices that fall inside the window with center
# (xc, yc)
indices = ((x >= xc - 0.5 * dx) & (x <= xc + 0.5 * dx) &
(y >= yc - 0.5 * dy) & (y <= yc + 0.5 * dy))
if not numpy.any(indices):
continue
solver = euler.subset(indices).fit()
cov = safe_inverse(solver.hessian(solver.p_))
uncertainty = numpy.sqrt(safe_diagonal(cov)[0:3])
mean_error = numpy.linalg.norm(uncertainty)
errors.append(mean_error)
paramvecs.append(solver.p_)
estimates.append(solver.estimate_)
baselevels.append(solver.baselevel_)
best = numpy.argsort(errors)[:int(self.keep * len(errors))]
self.p_ = numpy.array(paramvecs)[best]
self.estimate_ = numpy.array(estimates)[best]
self.baselevel_ = numpy.array(baselevels)[best]
return self
|
thejdeep/CoAPthon | refs/heads/master | coapthon/messages/option.py | 4 | from coapthon import defines
from coapthon.utils import byte_len
__author__ = 'Giacomo Tanganelli'
class Option(object):
"""
Class to handle the CoAP Options.
"""
def __init__(self):
"""
Data structure to store options.
"""
self._number = None
self._value = None
@property
def number(self):
"""
Return the number of the option.
:return: the option number
"""
return self._number
@number.setter
def number(self, value):
"""
Set the option number.
:type value: int
:param value: the option number
"""
self._number = value
@property
def value(self):
"""
Return the option value.
:return: the option value in the correct format depending on the option
"""
if type(self._value) is None:
self._value = bytearray()
opt_type = defines.OptionRegistry.LIST[self._number].value_type
if opt_type == defines.INTEGER:
if byte_len(self._value) > 0:
return int(self._value)
else:
return defines.OptionRegistry.LIST[self._number].default
return self._value
@value.setter
def value(self, value):
"""
Set the value of the option.
:param value: the option value
"""
if type(value) is str:
value = bytearray(value, "utf-8")
elif type(value) is int and byte_len(value) != 0:
value = value
elif type(value) is int and byte_len(value) == 0:
value = 0
self._value = value
@property
def length(self):
"""
Return the value length
:rtype : int
"""
if isinstance(self._value, int):
return byte_len(self._value)
if self._value is None:
return 0
return len(self._value)
def is_safe(self):
"""
Check if the option is safe.
:rtype : bool
:return: True, if option is safe
"""
if self._number == defines.OptionRegistry.URI_HOST.number \
or self._number == defines.OptionRegistry.URI_PORT.number \
or self._number == defines.OptionRegistry.URI_PATH.number \
or self._number == defines.OptionRegistry.MAX_AGE.number \
or self._number == defines.OptionRegistry.URI_QUERY.number \
or self._number == defines.OptionRegistry.PROXY_URI.number \
or self._number == defines.OptionRegistry.PROXY_SCHEME.number:
return False
return True
@property
def name(self):
"""
Return option name.
:rtype : String
:return: the option name
"""
return defines.OptionRegistry.LIST[self._number].name
def __str__(self):
"""
Return a string representing the option
:rtype : String
:return: a message with the option name and the value
"""
return self.name + ": " + str(self.value) + "\n"
def __eq__(self, other):
"""
Return True if two option are equal
:type other: Option
:param other: the option to be compared against
:rtype : Boolean
:return: True, if option are equal
"""
return self.__dict__ == other.__dict__
|
paulmathews/nova | refs/heads/stable/folsom | nova/virt/vmwareapi/vim_util.py | 21 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API utility module.
"""
def build_selection_spec(client_factory, name):
"""Builds the selection spec."""
sel_spec = client_factory.create('ns0:SelectionSpec')
sel_spec.name = name
return sel_spec
def build_traversal_spec(client_factory, name, spec_type, path, skip,
select_set):
"""Builds the traversal spec object."""
traversal_spec = client_factory.create('ns0:TraversalSpec')
traversal_spec.name = name
traversal_spec.type = spec_type
traversal_spec.path = path
traversal_spec.skip = skip
traversal_spec.selectSet = select_set
return traversal_spec
def build_recursive_traversal_spec(client_factory):
"""
Builds the Recursive Traversal Spec to traverse the object managed
object hierarchy.
"""
visit_folders_select_spec = build_selection_spec(client_factory,
"visitFolders")
# For getting to hostFolder from datacenter
dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
"hostFolder", False,
[visit_folders_select_spec])
# For getting to vmFolder from datacenter
dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
"vmFolder", False,
[visit_folders_select_spec])
# For getting Host System to virtual machine
h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
"vm", False,
[visit_folders_select_spec])
# For getting to Host System from Compute Resource
cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
"ComputeResource", "host", False, [])
# For getting to datastore from Compute Resource
cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
"ComputeResource", "datastore", False, [])
rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
# For getting to resource pool from Compute Resource
cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
"ComputeResource", "resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# For getting to child res pool from the parent res pool
rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
"resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# For getting to Virtual Machine from the Resource Pool
rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
"vm", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# Get the assorted traversal spec which takes care of the objects to
# be searched for from the root folder
traversal_spec = build_traversal_spec(client_factory, "visitFolders",
"Folder", "childEntity", False,
[visit_folders_select_spec, dc_to_hf,
dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
rp_to_rp, h_to_vm, rp_to_vm])
return traversal_spec
def build_property_spec(client_factory, type="VirtualMachine",
properties_to_collect=None,
all_properties=False):
"""Builds the Property Spec."""
if not properties_to_collect:
properties_to_collect = ["name"]
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = all_properties
property_spec.pathSet = properties_to_collect
property_spec.type = type
return property_spec
def build_object_spec(client_factory, root_folder, traversal_specs):
"""Builds the object Spec."""
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = root_folder
object_spec.skip = False
object_spec.selectSet = traversal_specs
return object_spec
def build_property_filter_spec(client_factory, property_specs, object_specs):
"""Builds the Property Filter Spec."""
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_filter_spec.propSet = property_specs
property_filter_spec.objectSet = object_specs
return property_filter_spec
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.get_service_content().propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
obj_content = get_object_properties(vim, None, mobj, type, [property_name])
property_value = None
if obj_content:
dynamic_property = obj_content[0].propSet
if dynamic_property:
property_value = dynamic_property[0].val
return property_value
def get_objects(vim, type, properties_to_collect=None, all=False):
"""Gets the list of objects of the type specified."""
if not properties_to_collect:
properties_to_collect = ["name"]
client_factory = vim.client.factory
object_spec = build_object_spec(client_factory,
vim.get_service_content().rootFolder,
[build_recursive_traversal_spec(client_factory)])
property_spec = build_property_spec(client_factory, type=type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
specSet=[property_filter_spec])
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
"""
Gets the list of properties for the collection of
objects of the type specified.
"""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
specSet=[prop_filter_spec])
|
mfherbst/spack | refs/heads/develop | var/spack/repos/builtin/packages/perl-graph-readwrite/package.py | 5 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlGraphReadwrite(PerlPackage):
"""Write out directed graph in Dot format"""
homepage = "http://search.cpan.org/~neilb/Graph-ReadWrite/lib/Graph/Writer/Dot.pm"
url = "http://search.cpan.org/CPAN/authors/id/N/NE/NEILB/Graph-ReadWrite-2.09.tar.gz"
version('2.09', '5cd9191eadd2fe8fe8bb431575434f67')
|
ddiss/ceph-deploy | refs/heads/master | ceph_deploy/hosts/suse/uninstall.py | 3 | from ceph_deploy.lib import remoto
def uninstall(conn, purge=False):
packages = [
'ceph',
'libcephfs1',
'librados2',
'librbd1',
'ceph-radosgw',
]
cmd = [
'zypper',
'--non-interactive',
'--quiet',
'remove',
]
cmd.extend(packages)
remoto.process.run(conn, cmd)
|
jalaziz/django-guardian | refs/heads/master | utils.py | 85 |
def show_settings(settings, action):
import guardian
from django.utils.termcolors import colorize
guardian_path = guardian.__path__[0]
msg = "django-guardian module's path: %r" % guardian_path
print(colorize(msg, fg='magenta'))
db_conf = settings.DATABASES['default']
output = []
msg = "Starting %s for db backend: %s" % (action, db_conf['ENGINE'])
embracer = '=' * len(msg)
output.append(msg)
for key in sorted(db_conf.keys()):
if key == 'PASSWORD':
value = '****************'
else:
value = db_conf[key]
line = ' %s: "%s"' % (key, value)
output.append(line)
embracer = colorize('=' * len(max(output, key=lambda s: len(s))),
fg='green', opts=['bold'])
output = [colorize(line, fg='blue') for line in output]
output.insert(0, embracer)
output.append(embracer)
print('\n'.join(output))
|
lhilt/scipy | refs/heads/master | scipy/linalg/tests/test_solvers.py | 4 | from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from pytest import raises as assert_raises
from scipy.linalg import solve_sylvester
from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
from scipy.linalg import solve_continuous_are, solve_discrete_are
from scipy.linalg import block_diag, solve, LinAlgError
from scipy.sparse.sputils import matrix
def _load_data(name):
"""
Load npz data file under data/
Returns a copy of the data, rather than keeping the npz file open.
"""
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', name)
with np.load(filename) as f:
return dict(f.items())
class TestSolveLyapunov(object):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(matrix([[0, 1], [-1/2, -1]]),
(matrix([0, 3]).T * matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(matrix([[0, 1], [-1/2, -1]]),
(np.array(matrix([0, 3]).T * matrix([0, 3]).T.T))),
]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal(
np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(
np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
def test_solve_continuous_are():
mat6 = _load_data('carex_6_data.npz')
mat15 = _load_data('carex_15_data.npz')
mat18 = _load_data('carex_18_data.npz')
mat19 = _load_data('carex_19_data.npz')
mat20 = _load_data('carex_20_data.npz')
cases = [
# Carex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# Test Case 0: carex #1
(np.diag([1.], 1),
np.array([[0], [1]]),
block_diag(1., 2.),
1,
None),
# Test Case 1: carex #2
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4.]]),
1,
None),
# Test Case 2: carex #3
(np.array([[0, 1, 0, 0],
[0, -1.89, 0.39, -5.53],
[0, -0.034, -2.98, 2.43],
[0.034, -0.0011, -0.99, -0.21]]),
np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
np.array([[2.313, 2.727, 0.688, 0.023],
[2.727, 4.271, 1.148, 0.323],
[0.688, 1.148, 0.313, 0.102],
[0.023, 0.323, 0.102, 0.083]]),
np.eye(2),
None),
# Test Case 3: carex #4
(np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
[0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
[0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
[0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
[0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
[0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
[0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
[0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
[-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
).T * 0.001,
np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
[0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
np.eye(2),
None),
# Test Case 4: carex #5
(np.array(
[[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
[-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
[-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
[-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
[-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
[0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
[0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
[0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
[0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
np.array([[0.010, -0.011, -0.151],
[0.003, -0.021, 0.000],
[0.009, -0.059, 0.000],
[0.024, -0.162, 0.000],
[0.068, -0.445, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
np.eye(9),
np.eye(3),
None),
# Test Case 5: carex #6
(mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
# Test Case 6: carex #7
(np.array([[1, 0], [0, -2.]]),
np.array([[1e-6], [0]]),
np.ones((2, 2)),
1.,
'Bad residual accuracy'),
# Test Case 7: carex #8
(block_diag(-0.1, -0.02),
np.array([[0.100, 0.000], [0.001, 0.010]]),
np.array([[100, 1000], [1000, 10000]]),
np.ones((2, 2)) + block_diag(1e-6, 0),
None),
# Test Case 8: carex #9
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1.]]),
np.eye(2),
1.,
None),
# Test Case 9: carex #10
(np.array([[1.0000001, 1], [1., 1.0000001]]),
np.eye(2),
np.eye(2),
np.eye(2),
None),
# Test Case 10: carex #11
(np.array([[3, 1.], [4, 2]]),
np.array([[1], [1]]),
np.array([[-11, -5], [-5, -2.]]),
1.,
None),
# Test Case 11: carex #12
(np.array([[7000000., 2000000., -0.],
[2000000., 6000000., -2000000.],
[0., -2000000., 5000000.]]) / 3,
np.eye(3),
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
np.diag([1e-6, 1, 1e6])).dot(
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
np.eye(3) * 1e6,
'Bad Residual Accuracy'),
# Test Case 12: carex #13
(np.array([[0, 0.4, 0, 0],
[0, 0, 0.345, 0],
[0, -0.524e6, -0.465e6, 0.262e6],
[0, 0, 0, -1e6]]),
np.array([[0, 0, 0, 1e6]]).T,
np.diag([1, 0, 1, 0]),
1.,
None),
# Test Case 13: carex #14
(np.array([[-1e-6, 1, 0, 0],
[-1, -1e-6, 0, 0],
[0, 0, 1e-6, 1],
[0, 0, -1, 1e-6]]),
np.ones((4, 1)),
np.ones((4, 4)),
1.,
None),
# Test Case 14: carex #15
(mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
# Test Case 15: carex #16
(np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
np.eye(64),
np.eye(64),
np.eye(64),
None),
# Test Case 16: carex #17
(np.diag(np.ones((20, )), 1),
np.flipud(np.eye(21, 1)),
np.eye(21, 1) * np.eye(21, 1).T,
1,
'Bad Residual Accuracy'),
# Test Case 17: carex #18
(mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
# Test Case 18: carex #19
(mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
'Bad Residual Accuracy'),
# Test Case 19: carex #20
(mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
'Bad Residual Accuracy')
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
None, 9, 14, 13, 14, None, 12, None, None)
def _test_factory(case, dec):
"""Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r)
res = x.dot(a) + a.conj().T.dot(x) + q
out_fact = x.dot(b)
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_discrete_are():
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
# [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# TEST CASE 0 : Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0], [1]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 1 :Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j], [1j]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 2 : Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# TEST CASE 3 : User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695], [0.05532739], [0.78934047]]),
np.eye(3),
np.eye(1),
None),
# TEST CASE 4 : darex #1
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4]]),
np.array([[1]]),
None),
# TEST CASE 5 : darex #2
(np.array([[0.9512, 0], [0, 0.9048]]),
np.array([[4.877, 4.877], [-1.1895, 3.569]]),
np.array([[0.005, 0], [0, 0.02]]),
np.array([[1/3, 0], [0, 3]]),
None),
# TEST CASE 6 : darex #3
(np.array([[2, -1], [1, 0]]),
np.array([[1], [0]]),
np.array([[0, 0], [0, 1]]),
np.array([[0]]),
None),
# TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1], [0, -1]]),
np.array([[1, 0], [2, 1]]),
np.array([[-4, -4], [-4, 7]]) * (1/11),
np.array([[9, 3], [3, 1]]),
None),
# TEST CASE 8 : darex #5
(np.array([[0, 1], [0, 0]]),
np.array([[0], [1]]),
np.array([[1, 2], [2, 4]]),
np.array([[1]]),
None),
# TEST CASE 9 : darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# TEST CASE 10 : darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# TEST CASE 11 : darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# TEST CASE 12 : darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# TEST CASE 13 : darex #10
(np.kron(np.eye(2), np.diag([1, 1], k=1)),
np.kron(np.eye(2), np.array([[0], [0], [1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0], [0, 1]]),
None),
# TEST CASE 14 : darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# TEST CASE 15 : darex #12 - numerically least accurate example
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1]]),
np.eye(2),
np.array([[1]]),
None),
# TEST CASE 16 : darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
None),
# TEST CASE 17 : darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08], [0], [0], [0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
None),
# TEST CASE 18 : darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 15, 13,
14, 13, 13, 14, 12, 2, 5, 6, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r)
res = a.conj().T.dot(x.dot(a)) - x + q
res -= a.conj().T.dot(x.dot(b)).dot(
solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
# An infeasible example taken from https://arxiv.org/abs/1505.04861v1
A = np.triu(np.ones((3, 3)))
A[0, 1] = -1
B = np.array([[1, 1, 0], [0, 0, 1]]).T
Q = np.full_like(A, -2) + np.diag([8, -1, -1.9])
R = np.diag([-10, 0.1])
assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
def test_solve_generalized_continuous_are():
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None)
]
min_decimal = (10, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r, e, s)
res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
out_fact = e.conj().T.dot(x).dot(b) + s
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_generalized_discrete_are():
mat20170120 = _load_data('gendare_20170120_data.npz')
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None),
# user-reported (under PR-6616) 20-Jan-2017
# tests against the case where E is None but S is provided
(mat20170120['A'],
mat20170120['B'],
mat20170120['Q'],
mat20170120['R'],
None,
mat20170120['S'],
None),
]
min_decimal = (11, 11, 16)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r, e, s)
if e is None:
e = np.eye(a.shape[0])
if s is None:
s = np.zeros_like(b)
res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
res -= (a.conj().T.dot(x.dot(b)) + s).dot(
solve(r+b.conj().T.dot(x.dot(b)),
(b.conj().T.dot(x.dot(a)) + s.conj().T)
)
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_are_validate_args():
def test_square_shape():
nsq = np.ones((3, 2))
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nsq, 1, 1, 1)
assert_raises(ValueError, x, sq, sq, nsq, 1)
assert_raises(ValueError, x, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
def test_compatible_sizes():
nsq = np.ones((3, 2))
sq = np.eye(4)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, nsq, 1, 1)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
def test_symmetry():
nsym = np.arange(9).reshape(3, 3)
sym = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sym, sym, nsym, sym)
assert_raises(ValueError, x, sym, sym, sym, nsym)
def test_singularity():
sing = np.full((3, 3), 1e12)
sing[2, 2] -= 1
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, sq, sq, sq, sing)
assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
def test_finiteness():
nm = np.full((2, 2), np.nan)
sq = np.eye(2)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nm, sq, sq, sq)
assert_raises(ValueError, x, sq, nm, sq, sq)
assert_raises(ValueError, x, sq, sq, nm, sq)
assert_raises(ValueError, x, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
class TestSolveSylvester(object):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0],
[0, 1.0, 2.0, 0.0],
[0, 0, 3.0, -4],
[0, 0, 2, 5]]),
np.array([[2.0, 0, 0, 1.0],
[0, 1.0, 0.0, 0.0],
[0, 0, 1.0, -1],
[0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1, 1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
|
taichatha/youtube-dl | refs/heads/master | youtube_dl/extractor/libsyn.py | 106 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate
class LibsynIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
_TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
'md5': '443360ee1b58007bc3dcf09b41d093bb',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
'thumbnail': 're:^https?://.*',
},
}, {
'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
'md5': '6c5cb21acd622d754d3b1a92b582ce42',
'info_dict': {
'id': '3727166',
'ext': 'mp3',
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
'upload_date': '20150818',
'thumbnail': 're:^https?://.*',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
formats = [{
'url': media_url,
} for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
podcast_title = self._search_regex(
r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
episode_title = self._search_regex(
r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<div id="info_text_body">(.+?)</div>', webpage,
'description', default=None)
thumbnail = self._search_regex(
r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'formats': formats,
}
|
xaviercobain88/framework-python | refs/heads/master | openerp/addons/l10n_fr/wizard/fr_report_bilan.py | 45 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_bilan_report(osv.osv_memory):
_name = 'account.bilan.report'
_description = 'Account Bilan Report'
def _get_default_fiscalyear(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id':_get_default_fiscalyear
}
def print_bilan_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return {'type': 'ir.actions.report.xml', 'report_name': 'l10n.fr.bilan', 'datas': data}
account_bilan_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
justasabc/python_tutorials | refs/heads/master | tutorial/thread/thread.py | 1 | import threading
import time
exitFlag = 0
def print_time(threadName,delay,counter):
while counter:
if exitFlag:
thread.exit()
time.sleep(delay)
print("{0}: {1}".format(threadName,time.ctime(time.time())))
counter -=1
class MyThread(threading.Thread):
"""
a simple thread
"""
def __init__(self,threadID,threadName,delay,counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.threadName = threadName
self.delay = delay
self.counter = counter
def run(self):
print("Starting {0}".format(self.threadName))
print_time(self.threadName,self.delay,self.counter)
print("Exiting {0}".format(self.threadName))
def main():
thread1 = MyThread(1,'Thread-1',1,5)
thread2 = MyThread(2,'Thread-2',2,5)
thread1.start()
thread2.start()
print('Exiting Main Thread')
if __name__ == "__main__":
main()
|
mluo613/osf.io | refs/heads/develop | api/citations/views.py | 4 | from api.base import permissions as base_permissions
from api.base.filters import ODMFilterMixin
from api.base.pagination import NoMaxPageSizePagination
from api.base.utils import get_object_or_error
from api.base.views import JSONAPIBaseView
from api.citations.serializers import CitationSerializer
from framework.auth.oauth_scopes import CoreScopes
from rest_framework import permissions as drf_permissions
from rest_framework import generics
from website.models import CitationStyle
class CitationStyleList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin):
'''List of standard citation styles available for rendering citations. *Read-only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##Citation Attributes
name type description
=========================================================================
date_parsed string date the citation style was first added to the database
summary string summary of the citation style
short_title string a short name or nickname for the citation style
title string official name of the citation style
Citation style may be filtered by their 'title', 'short_title', 'summary', and 'id'
'''
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
serializer_class = CitationSerializer
pagination_class = NoMaxPageSizePagination
view_category = 'citations'
view_name = 'citation-list'
# overrides ListAPIView
def get_default_odm_query(self):
return
def get_queryset(self):
return CitationStyle.find(self.get_query_from_request())
class CitationStyleDetail(JSONAPIBaseView, generics.RetrieveAPIView):
'''Detail for a citation style *Read-only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##Citation Attributes
name type description
=========================================================================
date_parsed string date the citation style was first added to the database
summary string summary of the citation style
short_title string a short name or nickname for the citation style
title string official name of the citation style
'''
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
serializer_class = CitationSerializer
view_category = 'citations'
view_name = 'citation-detail'
def get_object(self):
cit = get_object_or_error(CitationStyle, self.kwargs['citation_id'])
self.check_object_permissions(self.request, cit)
return cit
|
levenlabs/ansible | refs/heads/stable-2.1 | test/units/parsing/test_unquote.py | 80 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.quoting import unquote
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestUnquote:
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
def check_unquote(self, quoted, expected):
tools.eq_(unquote(quoted), expected)
def test_unquote(self):
for datapoint in self.UNQUOTE_DATA:
yield self.check_unquote, datapoint[0], datapoint[1]
|
cafecivet/django_girls_tutorial | refs/heads/master | Lib/encodings/iso8859_16.py | 593 | """ Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-16',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\u20ac' # 0xA4 -> EURO SIGN
u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
u'\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON
u'\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE
u'\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK
u'\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
lcpt/xc | refs/heads/master | verif/tests/materials/uniaxial/test_concrete02_02.py | 1 | # -*- coding: utf-8 -*-
__author__= "Ana Ortega (AO_O) "
__copyright__= "Copyright 2016, AO_O"
__license__= "GPL"
__version__= "3.0"
__email__= "ana.ortega@ciccp.es "
''' Test of material concrete02 constitutive model.
In the tensile Branch, the post-cracking range is initially approximated with a exponential
decay curve, according to the model proposed by Stramandinoli and La Rovere, to take into account
the tension-stiffening effect. Since the concrete02 implementation requires a straight line, a
linear regression of this curve is used for this purpose.
'''
import os
import xc_base
import geom
import xc
from materials import concrete_base
from materials import typical_materials
from materials.ehe import EHE_materials
from materials import concrete_base
import matplotlib.pyplot as plt
import math
import numpy as np
width=0.3 #width (cross-section coordinate Y)
depth=0.5 #depth (cross-section coordinate Z)
ro_exp=0.67*1e-2 #longitudinal steel ratio
fct_exp=1.17*1e6 #
Ec_exp=10*1e9
fy_exp=526e6
Es_exp=197*1e9
As_calc=ro_exp*width*depth #reinforcement area in the cross-section
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
# Materials definition
concrAux= EHE_materials.HA25 #parameters only for the compression branch
#Reinforcing steel.
rfSteel=concrete_base.ReinforcingSteel(steelName='rfSteel', fyk=fy_exp, emax=0.08, gammaS=1.15,k=1.05)
rfSteel.Es=Es_exp
steelDiagram= rfSteel.defDiagK(preprocessor) #Definition of steel stress-strain diagram in XC.
#Parameters for tension stiffening of concrete
paramTS=concrete_base.paramTensStiffness(concrMat=concrAux,reinfMat=rfSteel,reinfRatio=ro_exp,diagType='K')
paramTS.E_c=Ec_exp #concrete elastic modulus
paramTS.f_ct=fct_exp #concrete tensile strength
paramTS.E_ct=Ec_exp #concrete elastic modulus in the tensile linear-elastic range
paramTS.E_s=Es_exp
paramTS.eps_y=fy_exp/Es_exp
#regression line type 1
ftdiag=paramTS.pointOnsetCracking()['ft']
ectdiag=paramTS.pointOnsetCracking()['eps_ct']
eydiag=paramTS.eps_y
######Etsdiag=ftdiag/(eydiag-ectdiag)
Etsdiag=paramTS.regresLine()['slope']
#Material for making concrete fibers: concrete02 with tension stiffening
concr= typical_materials.defConcrete02(preprocessor=preprocessor,name='concr',epsc0=concrAux.epsilon0(),fpc=concrAux.fmaxK(),fpcu=0.85*concrAux.fmaxK(),epscu=concrAux.epsilonU(),ratioSlope=0.1,ft=ftdiag,Ets=abs(Etsdiag))
#regression line passing through point (optional approximation)
# ftdiag=paramTS.f_ct
# Etsdiag=-paramTS.slopeRegresLineFixedPoint()
# concr= typical_materials.defConcrete02(preprocessor=preprocessor,name='concr',epsc0=concrAux.epsilon0(),fpc=concrAux.fmaxK(),fpcu=0.85*concrAux.fmaxK(),epscu=concrAux.epsilonU(),ratioSlope=0.1,ft=ftdiag,Ets=Etsdiag)
#
epsMin=0.0
epsMax=0.0028
incEps=(epsMax-epsMin)/25.0
strains=np.arange(epsMin,epsMax,incEps)
stress=list()
for eps in strains:
concr.setTrialStrain(eps, 0.0)
stress.append(concr.getStress())
#Test comparison values
stressComp=[0.0, 1029185.6272346211, 997696.6777423947, 966207.7282501684, 934718.778757942, 903229.8292657157, 871740.8797734893, 840251.930281263, 808762.9807890366, 777274.0312968101, 745785.0818045839, 714296.1323123574, 682807.1828201312, 651318.2333279047, 619829.2838356785, 588340.334343452, 556851.3848512258, 525362.4353589994, 493873.48586677294, 462384.5363745466, 430895.58688232035, 399406.6373900939, 367917.6878978675, 336428.73840564117, 304939.78891341493]
residStresses= (np.array(stress) - np.array(stressComp))
ratio1= np.linalg.norm(residStresses)/concrAux.fmaxK()
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if(ratio1<1e-5):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
# ### FIGURES & REPORTS
# plt.plot(strains,stress)
# #exponential curve
# strainsCurve=paramTS.ptosExpCurvPostCracking()['strainPts']
# stressCurve=paramTS.ptosExpCurvPostCracking()['stressPts']
# plt.plot(strainsCurve,stressCurve)
# plt.show()
# #report concrete material
# from postprocess.reports import report_material
# report_material.report_concrete02(concrDiag=concr,paramTensStiffening=paramTS,grTitle='concrete $\sigma-\epsilon$ curve',grFileName='conc2',texFileName='conc2.tex')
|
Qinusty/rethinkdb | refs/heads/next | test/rql_test/connections/http_support/werkzeug/wsgi.py | 146 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
|
mldbai/mldb | refs/heads/master | ext/hoedown/test/runner.py | 11 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import difflib
import json
import os
import re
import subprocess
import unittest
TEST_ROOT = os.path.dirname(__file__)
PROJECT_ROOT = os.path.dirname(TEST_ROOT)
HOEDOWN = [os.path.abspath(os.path.join(PROJECT_ROOT, 'hoedown'))]
TIDY = ['tidy', '--show-body-only', '1', '--show-warnings', '0',
'--quiet', '1']
CONFIG_PATH = os.path.join(TEST_ROOT, 'config.json')
SLUGIFY_PATTERN = re.compile(r'\W')
def with_metaclass(meta, *bases):
"""Metaclass injection utility from six.
See: https://pythonhosted.org/six/
"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
class TestFailed(AssertionError):
def __init__(self, name, expected, got):
super(TestFailed, self).__init__(self)
diff = difflib.unified_diff(
expected.splitlines(), got.splitlines(),
fromfile='Expected', tofile='Got',
)
self.description = '{name}\n{diff}'.format(
name=name, diff='\n'.join(diff),
)
def __str__(self):
return self.description
def _test_func(test_case):
flags = test_case.get('flags') or []
hoedown_proc = subprocess.Popen(
HOEDOWN + flags + [os.path.join(TEST_ROOT, test_case['input'])],
stdout=subprocess.PIPE,
)
stdoutdata = hoedown_proc.communicate()[0]
got_tidy_proc = subprocess.Popen(
TIDY, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
)
got = got_tidy_proc.communicate(input=stdoutdata)[0].strip()
expected_tidy_proc = subprocess.Popen(
TIDY + [os.path.join(TEST_ROOT, test_case['output'])],
stdout=subprocess.PIPE,
)
expected = expected_tidy_proc.communicate()[0].strip()
# Cleanup.
hoedown_proc.stdout.close()
got_tidy_proc.stdout.close()
expected_tidy_proc.stdout.close()
try:
assert expected == got
except AssertionError:
raise TestFailed(test_case['input'], expected, got)
def _make_test(test_case):
return lambda self: _test_func(test_case)
class MarkdownTestsMeta(type):
"""Meta class for ``MarkdownTestCase`` to inject test cases on the fly.
"""
def __new__(meta, name, bases, attrs):
with open(CONFIG_PATH) as f:
config = json.load(f)
for test in config['tests']:
input_name = test['input']
attr_name = 'test_' + SLUGIFY_PATTERN.sub(
'_', os.path.splitext(input_name)[0].lower(),
)
func = _make_test(test)
func.__doc__ = input_name
if test.get('skip', False):
func = unittest.skip(input_name)(func)
if test.get('fail', False):
func = unittest.expectsFailure(func)
attrs[attr_name] = func
return type.__new__(meta, name, bases, attrs)
class MarkdownTests(with_metaclass(MarkdownTestsMeta, unittest.TestCase)):
pass
if __name__ == '__main__':
unittest.main()
|
ledusledus/elevator | refs/heads/master | dxf_writer.py | 1 | from dxfwrite import DXFEngine
def WriteDXF(filename, lines, elevations, scaler):
drawing = DXFEngine.drawing(filename)
for idx in xrange(len( lines )):
points = []
for point in lines[idx]:
points.append(scaler.inverse((point[0], point[1])))
drawing.add(DXFEngine.polyline(points, polyline_elevation = (0,0,elevations[idx])))
drawing.save()
#dxf = dxfgrabber.readfile(filename)
#print len(dxf.header)
#layer_count = len(dxf.layers) # collection of layer definitions
#block_definition_count = len(dxf.blocks) # dict like collection of block definitions
#entity_count = len(dxf.entities) # list like collection of entities
#print layer_count, block_definition_count, entitiy_count
|
pnorman/mapnik | refs/heads/master | plugins/input/csv/build.py | 2 | #
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2015 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
Import ('env')
can_build = False
if env.get('BOOST_LIB_VERSION_FROM_HEADER'):
boost_version_from_header = int(env['BOOST_LIB_VERSION_FROM_HEADER'].split('_')[1])
if boost_version_from_header >= 56:
can_build = True
if not can_build:
print 'WARNING: skipping building the optional CSV datasource plugin which requires boost >= 1.56'
else:
Import ('plugin_base')
PLUGIN_NAME = 'csv'
plugin_env = plugin_base.Clone()
plugin_sources = Split(
"""
%(PLUGIN_NAME)s_utils.cpp
%(PLUGIN_NAME)s_datasource.cpp
%(PLUGIN_NAME)s_featureset.cpp
%(PLUGIN_NAME)s_inline_featureset.cpp
%(PLUGIN_NAME)s_index_featureset.cpp
""" % locals()
)
# Link Library to Dependencies
libraries = []
libraries.append('mapnik-json')
libraries.append('mapnik-wkt')
if env['PLUGIN_LINKING'] == 'shared':
libraries.append('boost_system%s' % env['BOOST_APPEND'])
libraries.insert(0,env['MAPNIK_NAME'])
libraries.append(env['ICU_LIB_NAME'])
TARGET = plugin_env.SharedLibrary('../%s' % PLUGIN_NAME,
SHLIBPREFIX='',
SHLIBSUFFIX='.input',
source=plugin_sources,
LIBS=libraries)
# if the plugin links to libmapnik ensure it is built first
Depends(TARGET, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))
Depends(TARGET, env.subst('../../../src/json/libmapnik-json${LIBSUFFIX}'))
Depends(TARGET, env.subst('../../../src/wkt/libmapnik-wkt${LIBSUFFIX}'))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(env['MAPNIK_INPUT_PLUGINS_DEST'], TARGET)
env.Alias('install', env['MAPNIK_INPUT_PLUGINS_DEST'])
plugin_obj = {
'LIBS': libraries,
'SOURCES': plugin_sources,
}
Return('plugin_obj')
|
Jonekee/chromium.src | refs/heads/nw12 | third_party/typ/typ/fakes/host_fake.py | 36 | # Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import logging
import sys
from typ.host import _TeedStream
is_python3 = bool(sys.version_info.major == 3)
if is_python3: # pragma: python3
# redefining built-in 'unicode' pylint: disable=W0622
unicode = str
class FakeHost(object):
# "too many instance attributes" pylint: disable=R0902
# "redefining built-in" pylint: disable=W0622
# "unused arg" pylint: disable=W0613
python_interpreter = 'python'
is_python3 = bool(sys.version_info.major == 3)
def __init__(self):
self.logger = logging.getLogger()
self.stdin = io.StringIO()
self.stdout = io.StringIO()
self.stderr = io.StringIO()
self.platform = 'linux2'
self.env = {}
self.sep = '/'
self.dirs = set([])
self.files = {}
self.fetches = []
self.fetch_responses = {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.mtimes = {}
self.cmds = []
self.cwd = '/tmp'
self._orig_logging_handlers = []
def __getstate__(self):
d = copy.copy(self.__dict__)
del d['stderr']
del d['stdout']
del d['stdin']
del d['logger']
del d['_orig_logging_handlers']
return d
def __setstate__(self, d):
for k, v in d.items():
setattr(self, k, v)
self.logger = logging.getLogger()
self.stdin = io.StringIO()
self.stdout = io.StringIO()
self.stderr = io.StringIO()
def abspath(self, *comps):
relpath = self.join(*comps)
if relpath.startswith('/'):
return relpath
return self.join(self.cwd, relpath)
def add_to_path(self, *comps):
absolute_path = self.abspath(*comps)
if absolute_path not in sys.path:
sys.path.append(absolute_path)
def basename(self, path):
return path.split(self.sep)[-1]
def call(self, argv, stdin=None, env=None):
self.cmds.append(argv)
return 0, '', ''
def call_inline(self, argv):
return self.call(argv)[0]
def chdir(self, *comps):
path = self.join(*comps)
if not path.startswith('/'):
path = self.join(self.cwd, path)
self.cwd = path
def cpu_count(self):
return 1
def dirname(self, path):
return '/'.join(path.split('/')[:-1])
def exists(self, *comps):
path = self.abspath(*comps)
return ((path in self.files and self.files[path] is not None) or
path in self.dirs)
def files_under(self, top):
files = []
top = self.abspath(top)
for f in self.files:
if self.files[f] is not None and f.startswith(top):
files.append(self.relpath(f, top))
return files
def for_mp(self):
return self
def getcwd(self):
return self.cwd
def getenv(self, key, default=None):
return self.env.get(key, default)
def getpid(self):
return 1
def isdir(self, *comps):
path = self.abspath(*comps)
return path in self.dirs
def isfile(self, *comps):
path = self.abspath(*comps)
return path in self.files and self.files[path] is not None
def join(self, *comps):
p = ''
for c in comps:
if c in ('', '.'):
continue
elif c.startswith('/'):
p = c
elif p:
p += '/' + c
else:
p = c
# Handle ./
p = p.replace('/./', '/')
# Handle ../
while '/..' in p:
comps = p.split('/')
idx = comps.index('..')
comps = comps[:idx-1] + comps[idx+1:]
p = '/'.join(comps)
return p
def maybe_mkdir(self, *comps):
path = self.abspath(self.join(*comps))
if path not in self.dirs:
self.dirs.add(path)
def mktempfile(self, delete=True):
curno = self.current_tmpno
self.current_tmpno += 1
f = io.StringIO()
f.name = '__im_tmp/tmpfile_%u' % curno
return f
def mkdtemp(self, suffix='', prefix='tmp', dir=None, **_kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
self.dirs.add(self.last_tmpdir)
return self.last_tmpdir
def mtime(self, *comps):
return self.mtimes.get(self.join(*comps), 0)
def print_(self, msg='', end='\n', stream=None):
stream = stream or self.stdout
stream.write(msg + end)
stream.flush()
def read_binary_file(self, *comps):
return self._read(comps)
def read_text_file(self, *comps):
return self._read(comps)
def _read(self, comps):
return self.files[self.abspath(*comps)]
def realpath(self, *comps):
return self.abspath(*comps)
def relpath(self, path, start):
return path.replace(start + '/', '')
def remove(self, *comps):
path = self.abspath(*comps)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, *comps):
path = self.abspath(*comps)
for f in self.files:
if f.startswith(path):
self.files[f] = None
self.written_files[f] = None
self.dirs.remove(path)
def terminal_width(self):
return 80
def splitext(self, path):
idx = path.rfind('.')
if idx == -1:
return (path, '')
return (path[:idx], path[idx:])
def time(self):
return 0
def write_binary_file(self, path, contents):
self._write(path, contents)
def write_text_file(self, path, contents):
self._write(path, contents)
def _write(self, path, contents):
full_path = self.abspath(path)
self.maybe_mkdir(self.dirname(full_path))
self.files[full_path] = contents
self.written_files[full_path] = contents
def fetch(self, url, data=None, headers=None):
resp = self.fetch_responses.get(url, FakeResponse(unicode(''), url))
self.fetches.append((url, data, headers, resp))
return resp
def _tap_output(self):
self.stdout = _TeedStream(self.stdout)
self.stderr = _TeedStream(self.stderr)
if True:
sys.stdout = self.stdout
sys.stderr = self.stderr
def _untap_output(self):
assert isinstance(self.stdout, _TeedStream)
self.stdout = self.stdout.stream
self.stderr = self.stderr.stream
if True:
sys.stdout = self.stdout
sys.stderr = self.stderr
def capture_output(self, divert=True):
self._tap_output()
self._orig_logging_handlers = self.logger.handlers
if self._orig_logging_handlers:
self.logger.handlers = [logging.StreamHandler(self.stderr)]
self.stdout.capture(divert=divert)
self.stderr.capture(divert=divert)
def restore_output(self):
assert isinstance(self.stdout, _TeedStream)
out, err = (self.stdout.restore(), self.stderr.restore())
self.logger.handlers = self._orig_logging_handlers
self._untap_output()
return out, err
class FakeResponse(io.StringIO):
def __init__(self, response, url, code=200):
io.StringIO.__init__(self, response)
self._url = url
self.code = code
def geturl(self):
return self._url
def getcode(self):
return self.code
|
LiFengPro/trazip | refs/heads/dev | trazip/urls.py | 1 | """trazip URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'', include('frontier.urls')),
# url(r'^api/', include('foundation.urls')),
# url(r'^api/', include('hotel.urls')),
# url(r'^api/', include('travel.urls')),
url(r'^admin/', admin.site.urls),
]
|
suncycheng/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/contrib/localflavor/in_/forms.py | 309 | """
India-specific Form helpers.
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import gettext
import re
class INZipCodeField(RegexField):
default_error_messages = {
'invalid': gettext(u'Enter a zip code in the format XXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(INZipCodeField, self).__init__(r'^\d{6}$',
max_length=None, min_length=None, *args, **kwargs)
class INStateField(Field):
"""
A form field that validates its input is a Indian state name or
abbreviation. It normalizes the input to the standard two-letter vehicle
registration abbreviation for the given state or union territory
"""
default_error_messages = {
'invalid': u'Enter a Indian state or territory.',
}
def clean(self, value):
from in_states import STATES_NORMALIZED
super(INStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return smart_unicode(STATES_NORMALIZED[value.strip().lower()])
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class INStateSelect(Select):
"""
A Select widget that uses a list of Indian states/territories as its
choices.
"""
def __init__(self, attrs=None):
from in_states import STATE_CHOICES
super(INStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
|
OkBuilds/buck | refs/heads/master | third-party/py/pex/pex/environment.py | 52 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import itertools
import os
import site
import sys
import uuid
from pkg_resources import (
DistributionNotFound,
Environment,
Requirement,
WorkingSet,
find_distributions
)
from .common import die, open_zip, safe_mkdir, safe_rmtree
from .interpreter import PythonInterpreter
from .package import distribution_compatible
from .pex_builder import PEXBuilder
from .pex_info import PexInfo
from .tracer import TRACER
from .util import CacheHelper, DistributionHelper
class PEXEnvironment(Environment):
@classmethod
def force_local(cls, pex, pex_info):
if pex_info.code_hash is None:
# Do not support force_local if code_hash is not set. (It should always be set.)
return pex
explode_dir = os.path.join(pex_info.zip_unsafe_cache, pex_info.code_hash)
TRACER.log('PEX is not zip safe, exploding to %s' % explode_dir)
if not os.path.exists(explode_dir):
explode_tmp = explode_dir + '.' + uuid.uuid4().hex
with TRACER.timed('Unzipping %s' % pex):
try:
safe_mkdir(explode_tmp)
with open_zip(pex) as pex_zip:
pex_files = (x for x in pex_zip.namelist()
if not x.startswith(PEXBuilder.BOOTSTRAP_DIR) and
not x.startswith(PexInfo.INTERNAL_CACHE))
pex_zip.extractall(explode_tmp, pex_files)
except: # noqa: T803
safe_rmtree(explode_tmp)
raise
TRACER.log('Renaming %s to %s' % (explode_tmp, explode_dir))
os.rename(explode_tmp, explode_dir)
return explode_dir
@classmethod
def update_module_paths(cls, new_code_path):
# Force subsequent imports to come from the .pex directory rather than the .pex file.
TRACER.log('Adding to the head of sys.path: %s' % new_code_path)
sys.path.insert(0, new_code_path)
for name, module in sys.modules.items():
if hasattr(module, "__path__"):
module_dir = os.path.join(new_code_path, *name.split("."))
TRACER.log('Adding to the head of %s.__path__: %s' % (module.__name__, module_dir))
module.__path__.insert(0, module_dir)
@classmethod
def write_zipped_internal_cache(cls, pex, pex_info):
prefix_length = len(pex_info.internal_cache) + 1
existing_cached_distributions = []
newly_cached_distributions = []
zip_safe_distributions = []
with open_zip(pex) as zf:
# Distribution names are the first element after ".deps/" and before the next "/"
distribution_names = set(filter(None, (filename[prefix_length:].split('/')[0]
for filename in zf.namelist() if filename.startswith(pex_info.internal_cache))))
# Create Distribution objects from these, and possibly write to disk if necessary.
for distribution_name in distribution_names:
internal_dist_path = '/'.join([pex_info.internal_cache, distribution_name])
# First check if this is already cached
dist_digest = pex_info.distributions.get(distribution_name) or CacheHelper.zip_hash(
zf, internal_dist_path)
cached_location = os.path.join(pex_info.install_cache, '%s.%s' % (
distribution_name, dist_digest))
if os.path.exists(cached_location):
dist = DistributionHelper.distribution_from_path(cached_location)
existing_cached_distributions.append(dist)
continue
else:
dist = DistributionHelper.distribution_from_path(os.path.join(pex, internal_dist_path))
if DistributionHelper.zipsafe(dist) and not pex_info.always_write_cache:
zip_safe_distributions.append(dist)
continue
with TRACER.timed('Caching %s' % dist):
newly_cached_distributions.append(
CacheHelper.cache_distribution(zf, internal_dist_path, cached_location))
return existing_cached_distributions, newly_cached_distributions, zip_safe_distributions
@classmethod
def load_internal_cache(cls, pex, pex_info):
"""Possibly cache out the internal cache."""
internal_cache = os.path.join(pex, pex_info.internal_cache)
with TRACER.timed('Searching dependency cache: %s' % internal_cache, V=2):
if os.path.isdir(pex):
for dist in find_distributions(internal_cache):
yield dist
else:
for dist in itertools.chain(*cls.write_zipped_internal_cache(pex, pex_info)):
yield dist
def __init__(self, pex, pex_info, interpreter=None, **kw):
self._internal_cache = os.path.join(pex, pex_info.internal_cache)
self._pex = pex
self._pex_info = pex_info
self._activated = False
self._working_set = None
self._interpreter = interpreter or PythonInterpreter.get()
super(PEXEnvironment, self).__init__(
search_path=sys.path if pex_info.inherit_path else [], **kw)
def update_candidate_distributions(self, distribution_iter):
for dist in distribution_iter:
if self.can_add(dist):
with TRACER.timed('Adding %s' % dist, V=2):
self.add(dist)
def can_add(self, dist):
return distribution_compatible(dist, self._interpreter, self.platform)
def activate(self):
if not self._activated:
with TRACER.timed('Activating PEX virtual environment from %s' % self._pex):
self._working_set = self._activate()
self._activated = True
return self._working_set
def _resolve(self, working_set, reqs):
reqs = reqs[:]
unresolved_reqs = set()
resolveds = set()
# Resolve them one at a time so that we can figure out which ones we need to elide should
# there be an interpreter incompatibility.
for req in reqs:
with TRACER.timed('Resolving %s' % req, V=2):
try:
resolveds.update(working_set.resolve([req], env=self))
except DistributionNotFound as e:
TRACER.log('Failed to resolve a requirement: %s' % e)
unresolved_reqs.add(e.args[0].project_name)
# Older versions of pkg_resources just call `DistributionNotFound(req)` instead of the
# modern `DistributionNotFound(req, requirers)` and so we may not have the 2nd requirers
# slot at all.
if len(e.args) >= 2 and e.args[1]:
unresolved_reqs.update(e.args[1])
unresolved_reqs = set([req.lower() for req in unresolved_reqs])
if unresolved_reqs:
TRACER.log('Unresolved requirements:')
for req in unresolved_reqs:
TRACER.log(' - %s' % req)
TRACER.log('Distributions contained within this pex:')
if not self._pex_info.distributions:
TRACER.log(' None')
else:
for dist in self._pex_info.distributions:
TRACER.log(' - %s' % dist)
if not self._pex_info.ignore_errors:
die('Failed to execute PEX file, missing compatible dependencies for:\n%s' % (
'\n'.join(map(str, unresolved_reqs))))
return resolveds
def _activate(self):
self.update_candidate_distributions(self.load_internal_cache(self._pex, self._pex_info))
if not self._pex_info.zip_safe and os.path.isfile(self._pex):
self.update_module_paths(self.force_local(self._pex, self._pex_info))
all_reqs = [Requirement.parse(req) for req in self._pex_info.requirements]
working_set = WorkingSet([])
resolved = self._resolve(working_set, all_reqs)
for dist in resolved:
with TRACER.timed('Activating %s' % dist, V=2):
working_set.add(dist)
if os.path.isdir(dist.location):
with TRACER.timed('Adding sitedir', V=2):
site.addsitedir(dist.location)
dist.activate()
return working_set
|
anders94/mitcoin | refs/heads/master | qa/rpc-tests/receivedby.py | 1 | #!/usr/bin/env python2
# Copyright (c) 2014 The mitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework.test_framework import mitcoinTestFramework
from test_framework.util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(mitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
ddraj/hbase-trunk-mttr | refs/heads/master | hbase-examples/src/main/python/gen-py/hbase/ttypes.py | 43 | #
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TCell:
"""
TCell - Used to transport a cell value (byte[]) and the timestamp it was
stored with together as a result for get and getRow methods. This promotes
the timestamp of a cell to a first-class value, making it easy to take
note of temporal data. Cell is used all the way from HStore up to HTable.
Attributes:
- value
- timestamp
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'value', None, None, ), # 1
(2, TType.I64, 'timestamp', None, None, ), # 2
)
def __init__(self, value=None, timestamp=None,):
self.value = value
self.timestamp = timestamp
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCell')
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 1)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 2)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnDescriptor:
"""
An HColumnDescriptor contains information about a column family
such as the number of versions, compression settings, etc. It is
used as input when creating a table or adding a column.
Attributes:
- name
- maxVersions
- compression
- inMemory
- bloomFilterType
- bloomFilterVectorSize
- bloomFilterNbHashes
- blockCacheEnabled
- timeToLive
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'maxVersions', None, 3, ), # 2
(3, TType.STRING, 'compression', None, "NONE", ), # 3
(4, TType.BOOL, 'inMemory', None, False, ), # 4
(5, TType.STRING, 'bloomFilterType', None, "NONE", ), # 5
(6, TType.I32, 'bloomFilterVectorSize', None, 0, ), # 6
(7, TType.I32, 'bloomFilterNbHashes', None, 0, ), # 7
(8, TType.BOOL, 'blockCacheEnabled', None, False, ), # 8
(9, TType.I32, 'timeToLive', None, -1, ), # 9
)
def __init__(self, name=None, maxVersions=thrift_spec[2][4], compression=thrift_spec[3][4], inMemory=thrift_spec[4][4], bloomFilterType=thrift_spec[5][4], bloomFilterVectorSize=thrift_spec[6][4], bloomFilterNbHashes=thrift_spec[7][4], blockCacheEnabled=thrift_spec[8][4], timeToLive=thrift_spec[9][4],):
self.name = name
self.maxVersions = maxVersions
self.compression = compression
self.inMemory = inMemory
self.bloomFilterType = bloomFilterType
self.bloomFilterVectorSize = bloomFilterVectorSize
self.bloomFilterNbHashes = bloomFilterNbHashes
self.blockCacheEnabled = blockCacheEnabled
self.timeToLive = timeToLive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.maxVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.compression = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.inMemory = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.bloomFilterType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.bloomFilterVectorSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.bloomFilterNbHashes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.blockCacheEnabled = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.timeToLive = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnDescriptor')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.maxVersions is not None:
oprot.writeFieldBegin('maxVersions', TType.I32, 2)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.STRING, 3)
oprot.writeString(self.compression)
oprot.writeFieldEnd()
if self.inMemory is not None:
oprot.writeFieldBegin('inMemory', TType.BOOL, 4)
oprot.writeBool(self.inMemory)
oprot.writeFieldEnd()
if self.bloomFilterType is not None:
oprot.writeFieldBegin('bloomFilterType', TType.STRING, 5)
oprot.writeString(self.bloomFilterType)
oprot.writeFieldEnd()
if self.bloomFilterVectorSize is not None:
oprot.writeFieldBegin('bloomFilterVectorSize', TType.I32, 6)
oprot.writeI32(self.bloomFilterVectorSize)
oprot.writeFieldEnd()
if self.bloomFilterNbHashes is not None:
oprot.writeFieldBegin('bloomFilterNbHashes', TType.I32, 7)
oprot.writeI32(self.bloomFilterNbHashes)
oprot.writeFieldEnd()
if self.blockCacheEnabled is not None:
oprot.writeFieldBegin('blockCacheEnabled', TType.BOOL, 8)
oprot.writeBool(self.blockCacheEnabled)
oprot.writeFieldEnd()
if self.timeToLive is not None:
oprot.writeFieldBegin('timeToLive', TType.I32, 9)
oprot.writeI32(self.timeToLive)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRegionInfo:
"""
A TRegionInfo contains information about an HTable region.
Attributes:
- startKey
- endKey
- id
- name
- version
- serverName
- port
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startKey', None, None, ), # 1
(2, TType.STRING, 'endKey', None, None, ), # 2
(3, TType.I64, 'id', None, None, ), # 3
(4, TType.STRING, 'name', None, None, ), # 4
(5, TType.BYTE, 'version', None, None, ), # 5
(6, TType.STRING, 'serverName', None, None, ), # 6
(7, TType.I32, 'port', None, None, ), # 7
)
def __init__(self, startKey=None, endKey=None, id=None, name=None, version=None, serverName=None, port=None,):
self.startKey = startKey
self.endKey = endKey
self.id = id
self.name = name
self.version = version
self.serverName = serverName
self.port = port
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.endKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.id = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BYTE:
self.version = iprot.readByte();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.serverName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.port = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRegionInfo')
if self.startKey is not None:
oprot.writeFieldBegin('startKey', TType.STRING, 1)
oprot.writeString(self.startKey)
oprot.writeFieldEnd()
if self.endKey is not None:
oprot.writeFieldBegin('endKey', TType.STRING, 2)
oprot.writeString(self.endKey)
oprot.writeFieldEnd()
if self.id is not None:
oprot.writeFieldBegin('id', TType.I64, 3)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 4)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.version is not None:
oprot.writeFieldBegin('version', TType.BYTE, 5)
oprot.writeByte(self.version)
oprot.writeFieldEnd()
if self.serverName is not None:
oprot.writeFieldBegin('serverName', TType.STRING, 6)
oprot.writeString(self.serverName)
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 7)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Mutation:
"""
A Mutation object is used to either update or delete a column-value.
Attributes:
- isDelete
- column
- value
- writeToWAL
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isDelete', None, False, ), # 1
(2, TType.STRING, 'column', None, None, ), # 2
(3, TType.STRING, 'value', None, None, ), # 3
(4, TType.BOOL, 'writeToWAL', None, True, ), # 4
)
def __init__(self, isDelete=thrift_spec[1][4], column=None, value=None, writeToWAL=thrift_spec[4][4],):
self.isDelete = isDelete
self.column = column
self.value = value
self.writeToWAL = writeToWAL
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isDelete = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.writeToWAL = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Mutation')
if self.isDelete is not None:
oprot.writeFieldBegin('isDelete', TType.BOOL, 1)
oprot.writeBool(self.isDelete)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 2)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.writeToWAL is not None:
oprot.writeFieldBegin('writeToWAL', TType.BOOL, 4)
oprot.writeBool(self.writeToWAL)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BatchMutation:
"""
A BatchMutation object is used to apply a number of Mutations to a single row.
Attributes:
- row
- mutations
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 2
)
def __init__(self, row=None, mutations=None,):
self.row = row
self.mutations = mutations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.mutations = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = Mutation()
_elem5.read(iprot)
self.mutations.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BatchMutation')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter6 in self.mutations:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIncrement:
"""
For increments that are not incrementColumnValue
equivalents.
Attributes:
- table
- row
- column
- ammount
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'ammount', None, None, ), # 4
)
def __init__(self, table=None, row=None, column=None, ammount=None,):
self.table = table
self.row = row
self.column = column
self.ammount = ammount
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.ammount = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TIncrement')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.ammount is not None:
oprot.writeFieldBegin('ammount', TType.I64, 4)
oprot.writeI64(self.ammount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRowResult:
"""
Holds row name and then a map of columns to cells.
Attributes:
- row
- columns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.MAP, 'columns', (TType.STRING,None,TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 2
)
def __init__(self, row=None, columns=None,):
self.row = row
self.columns = columns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.columns = {}
(_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString();
_val13 = TCell()
_val13.read(iprot)
self.columns[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRowResult')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.columns))
for kiter14,viter15 in self.columns.items():
oprot.writeString(kiter14)
viter15.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TScan:
"""
A Scan object is used to specify scanner parameters when opening a scanner.
Attributes:
- startRow
- stopRow
- timestamp
- columns
- caching
- filterString
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startRow', None, None, ), # 1
(2, TType.STRING, 'stopRow', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.I32, 'caching', None, None, ), # 5
(6, TType.STRING, 'filterString', None, None, ), # 6
)
def __init__(self, startRow=None, stopRow=None, timestamp=None, columns=None, caching=None, filterString=None,):
self.startRow = startRow
self.stopRow = stopRow
self.timestamp = timestamp
self.columns = columns
self.caching = caching
self.filterString = filterString
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype19, _size16) = iprot.readListBegin()
for _i20 in xrange(_size16):
_elem21 = iprot.readString();
self.columns.append(_elem21)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.caching = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.filterString = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TScan')
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 1)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 2)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter22 in self.columns:
oprot.writeString(iter22)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.caching is not None:
oprot.writeFieldBegin('caching', TType.I32, 5)
oprot.writeI32(self.caching)
oprot.writeFieldEnd()
if self.filterString is not None:
oprot.writeFieldBegin('filterString', TType.STRING, 6)
oprot.writeString(self.filterString)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IOError(TException):
"""
An IOError exception signals that an error occurred communicating
to the Hbase master or an Hbase region server. Also used to return
more general Hbase error conditions.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IOError')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IllegalArgument(TException):
"""
An IllegalArgument exception indicates an illegal or invalid
argument was passed into a procedure.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IllegalArgument')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyExists(TException):
"""
An AlreadyExists exceptions signals that a table with the specified
name already exists
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyExists')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
inspirehep/sqlalchemy | refs/heads/inspire | test/dialect/postgresql/test_reflection.py | 5 | # coding: utf-8
from sqlalchemy.engine import reflection
from sqlalchemy.testing.assertions import eq_, assert_raises, \
AssertsExecutionResults
from sqlalchemy.testing import fixtures
from sqlalchemy import testing
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, String, \
PrimaryKeyConstraint, ForeignKey, join, Sequence, UniqueConstraint, \
Index
from sqlalchemy import exc
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import ARRAY
class ForeignTableReflectionTest(fixtures.TablesTest, AssertsExecutionResults):
"""Test reflection on foreign tables"""
__requires__ = 'postgresql_test_dblink',
__only_on__ = 'postgresql >= 9.3'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.testing import config
dblink = config.file_config.get(
'sqla_testing', 'postgres_test_db_link')
testtable = Table(
'testtable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30)))
for ddl in [
"CREATE SERVER test_server FOREIGN DATA WRAPPER postgres_fdw "
"OPTIONS (dbname 'test', host '%s')" % dblink,
"CREATE USER MAPPING FOR public \
SERVER test_server options (user 'scott', password 'tiger')",
"CREATE FOREIGN TABLE test_foreigntable ( "
" id INT, "
" data VARCHAR(30) "
") SERVER test_server OPTIONS (table_name 'testtable')",
]:
sa.event.listen(metadata, "after_create", sa.DDL(ddl))
for ddl in [
'DROP FOREIGN TABLE test_foreigntable',
'DROP USER MAPPING FOR public SERVER test_server',
"DROP SERVER test_server"
]:
sa.event.listen(metadata, "before_drop", sa.DDL(ddl))
def test_foreign_table_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('test_foreigntable', metadata, autoload=True)
eq_(set(table.columns.keys()), set(['id', 'data']),
"Columns of reflected foreign table didn't equal expected columns")
def test_get_foreign_table_names(self):
inspector = inspect(testing.db)
with testing.db.connect() as conn:
ft_names = inspector.get_foreign_table_names()
eq_(ft_names, ['test_foreigntable'])
def test_get_table_names_no_foreign(self):
inspector = inspect(testing.db)
with testing.db.connect() as conn:
names = inspector.get_table_names()
eq_(names, ['testtable'])
class MaterializedViewReflectionTest(
fixtures.TablesTest, AssertsExecutionResults):
"""Test reflection on materialized views"""
__only_on__ = 'postgresql >= 9.3'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
testtable = Table(
'testtable', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(30)))
# insert data before we create the view
@sa.event.listens_for(testtable, "after_create")
def insert_data(target, connection, **kw):
connection.execute(
target.insert(),
{"id": 89, "data": 'd1'}
)
materialized_view = sa.DDL(
"CREATE MATERIALIZED VIEW test_mview AS "
"SELECT * FROM testtable")
plain_view = sa.DDL(
"CREATE VIEW test_regview AS "
"SELECT * FROM testtable")
sa.event.listen(testtable, 'after_create', plain_view)
sa.event.listen(testtable, 'after_create', materialized_view)
sa.event.listen(
testtable, 'before_drop',
sa.DDL("DROP MATERIALIZED VIEW test_mview")
)
sa.event.listen(
testtable, 'before_drop',
sa.DDL("DROP VIEW test_regview")
)
def test_mview_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('test_mview', metadata, autoload=True)
eq_(set(table.columns.keys()), set(['id', 'data']),
"Columns of reflected mview didn't equal expected columns")
def test_mview_select(self):
metadata = MetaData(testing.db)
table = Table('test_mview', metadata, autoload=True)
eq_(
table.select().execute().fetchall(),
[(89, 'd1',)]
)
def test_get_view_names(self):
insp = inspect(testing.db)
eq_(set(insp.get_view_names()), set(['test_mview', 'test_regview']))
class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"""Test PostgreSQL domains"""
__only_on__ = 'postgresql > 8.3'
__backend__ = True
@classmethod
def setup_class(cls):
con = testing.db.connect()
for ddl in \
'CREATE DOMAIN testdomain INTEGER NOT NULL DEFAULT 42', \
'CREATE DOMAIN test_schema.testdomain INTEGER DEFAULT 0', \
"CREATE TYPE testtype AS ENUM ('test')", \
'CREATE DOMAIN enumdomain AS testtype':
try:
con.execute(ddl)
except exc.DBAPIError as e:
if 'already exists' not in str(e):
raise e
con.execute('CREATE TABLE testtable (question integer, answer '
'testdomain)')
con.execute('CREATE TABLE test_schema.testtable(question '
'integer, answer test_schema.testdomain, anything '
'integer)')
con.execute('CREATE TABLE crosschema (question integer, answer '
'test_schema.testdomain)')
con.execute('CREATE TABLE enum_test (id integer, data enumdomain)')
@classmethod
def teardown_class(cls):
con = testing.db.connect()
con.execute('DROP TABLE testtable')
con.execute('DROP TABLE test_schema.testtable')
con.execute('DROP TABLE crosschema')
con.execute('DROP DOMAIN testdomain')
con.execute('DROP DOMAIN test_schema.testdomain')
con.execute("DROP TABLE enum_test")
con.execute("DROP DOMAIN enumdomain")
con.execute("DROP TYPE testtype")
def test_table_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True)
eq_(set(table.columns.keys()), set(['question', 'answer']),
"Columns of reflected table didn't equal expected columns")
assert isinstance(table.c.answer.type, Integer)
def test_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True)
eq_(str(table.columns.answer.server_default.arg), '42',
"Reflected default value didn't equal expected value")
assert not table.columns.answer.nullable, \
'Expected reflected column to not be nullable.'
def test_enum_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('enum_test', metadata, autoload=True)
eq_(
table.c.data.type.enums,
('test', )
)
def test_table_is_reflected_test_schema(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True,
schema='test_schema')
eq_(set(table.columns.keys()), set(['question', 'answer',
'anything']),
"Columns of reflected table didn't equal expected columns")
assert isinstance(table.c.anything.type, Integer)
def test_schema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('testtable', metadata, autoload=True,
schema='test_schema')
eq_(str(table.columns.answer.server_default.arg), '0',
"Reflected default value didn't equal expected value")
assert table.columns.answer.nullable, \
'Expected reflected column to be nullable.'
def test_crosschema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table('crosschema', metadata, autoload=True)
eq_(str(table.columns.answer.server_default.arg), '0',
"Reflected default value didn't equal expected value")
assert table.columns.answer.nullable, \
'Expected reflected column to be nullable.'
def test_unknown_types(self):
from sqlalchemy.databases import postgresql
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = {}
try:
m2 = MetaData(testing.db)
assert_raises(exc.SAWarning, Table, 'testtable', m2,
autoload=True)
@testing.emits_warning('Did not recognize type')
def warns():
m3 = MetaData(testing.db)
t3 = Table('testtable', m3, autoload=True)
assert t3.c.answer.type.__class__ == sa.types.NullType
finally:
postgresql.PGDialect.ischema_names = ischema_names
class ReflectionTest(fixtures.TestBase):
__only_on__ = 'postgresql'
__backend__ = True
@testing.fails_if("postgresql < 8.4",
"Better int2vector functions not available")
@testing.provide_metadata
def test_reflected_primary_key_order(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('p1', Integer, primary_key=True),
Column('p2', Integer, primary_key=True),
PrimaryKeyConstraint('p2', 'p1')
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True)
eq_(subject.primary_key.columns.keys(), ['p2', 'p1'])
@testing.provide_metadata
def test_pg_weirdchar_reflection(self):
meta1 = self.metadata
subject = Table('subject', meta1, Column('id$', Integer,
primary_key=True))
referer = Table(
'referer', meta1,
Column(
'id', Integer, primary_key=True),
Column(
'ref', Integer, ForeignKey('subject.id$')))
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True)
referer = Table('referer', meta2, autoload=True)
self.assert_((subject.c['id$']
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_reflect_default_over_128_chars(self):
Table('t', self.metadata,
Column('x', String(200), server_default="abcd" * 40)
).create(testing.db)
m = MetaData()
t = Table('t', m, autoload=True, autoload_with=testing.db)
eq_(
t.c.x.server_default.arg.text, "'%s'::character varying" % (
"abcd" * 40)
)
@testing.fails_if("postgresql < 8.1", "schema name leaks in, not sure")
@testing.provide_metadata
def test_renamed_sequence_reflection(self):
metadata = self.metadata
t = Table('t', metadata, Column('id', Integer, primary_key=True))
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True, implicit_returning=False)
eq_(t2.c.id.server_default.arg.text,
"nextval('t_id_seq'::regclass)")
r = t2.insert().execute()
eq_(r.inserted_primary_key, [1])
testing.db.connect().execution_options(autocommit=True).\
execute('alter table t_id_seq rename to foobar_id_seq'
)
m3 = MetaData(testing.db)
t3 = Table('t', m3, autoload=True, implicit_returning=False)
eq_(t3.c.id.server_default.arg.text,
"nextval('foobar_id_seq'::regclass)")
r = t3.insert().execute()
eq_(r.inserted_primary_key, [2])
@testing.provide_metadata
def test_renamed_pk_reflection(self):
metadata = self.metadata
t = Table('t', metadata, Column('id', Integer, primary_key=True))
metadata.create_all()
testing.db.connect().execution_options(autocommit=True).\
execute('alter table t rename id to t_id')
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True)
eq_([c.name for c in t2.primary_key], ['t_id'])
@testing.provide_metadata
def test_has_temporary_table(self):
assert not testing.db.has_table("some_temp_table")
user_tmp = Table(
"some_temp_table", self.metadata,
Column("id", Integer, primary_key=True),
Column('name', String(50)),
prefixes=['TEMPORARY']
)
user_tmp.create(testing.db)
assert testing.db.has_table("some_temp_table")
@testing.provide_metadata
def test_cross_schema_reflection_one(self):
meta1 = self.metadata
users = Table('users', meta1,
Column('user_id', Integer, primary_key=True),
Column('user_name', String(30), nullable=False),
schema='test_schema')
addresses = Table(
'email_addresses', meta1,
Column(
'address_id', Integer, primary_key=True),
Column(
'remote_user_id', Integer, ForeignKey(
users.c.user_id)),
Column(
'email_address', String(20)), schema='test_schema')
meta1.create_all()
meta2 = MetaData(testing.db)
addresses = Table('email_addresses', meta2, autoload=True,
schema='test_schema')
users = Table('users', meta2, mustexist=True,
schema='test_schema')
j = join(users, addresses)
self.assert_((users.c.user_id
== addresses.c.remote_user_id).compare(j.onclause))
@testing.provide_metadata
def test_cross_schema_reflection_two(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True))
referer = Table('referer', meta1,
Column('id', Integer, primary_key=True),
Column('ref', Integer, ForeignKey('subject.id')),
schema='test_schema')
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True)
referer = Table('referer', meta2, schema='test_schema',
autoload=True)
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_cross_schema_reflection_three(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema_2')
referer = Table(
'referer',
meta1,
Column(
'id',
Integer,
primary_key=True),
Column(
'ref',
Integer,
ForeignKey('test_schema_2.subject.id')),
schema='test_schema')
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True,
schema='test_schema_2')
referer = Table('referer', meta2, autoload=True,
schema='test_schema')
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_cross_schema_reflection_four(self):
meta1 = self.metadata
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema_2')
referer = Table(
'referer',
meta1,
Column(
'id',
Integer,
primary_key=True),
Column(
'ref',
Integer,
ForeignKey('test_schema_2.subject.id')),
schema='test_schema')
meta1.create_all()
conn = testing.db.connect()
conn.detach()
conn.execute("SET search_path TO test_schema, test_schema_2")
meta2 = MetaData(bind=conn)
subject = Table('subject', meta2, autoload=True,
schema='test_schema_2',
postgresql_ignore_search_path=True)
referer = Table('referer', meta2, autoload=True,
schema='test_schema',
postgresql_ignore_search_path=True)
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
conn.close()
@testing.provide_metadata
def test_cross_schema_reflection_five(self):
meta1 = self.metadata
# we assume 'public'
default_schema = testing.db.dialect.default_schema_name
subject = Table('subject', meta1,
Column('id', Integer, primary_key=True))
referer = Table('referer', meta1,
Column('id', Integer, primary_key=True),
Column('ref', Integer, ForeignKey('subject.id')))
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table('subject', meta2, autoload=True,
schema=default_schema,
postgresql_ignore_search_path=True
)
referer = Table('referer', meta2, autoload=True,
schema=default_schema,
postgresql_ignore_search_path=True
)
assert subject.schema == default_schema
self.assert_((subject.c.id
== referer.c.ref).compare(
subject.join(referer).onclause))
@testing.provide_metadata
def test_cross_schema_reflection_six(self):
# test that the search path *is* taken into account
# by default
meta1 = self.metadata
Table('some_table', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema'
)
Table('some_other_table', meta1,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('test_schema.some_table.id')),
schema='test_schema_2'
)
meta1.create_all()
with testing.db.connect() as conn:
conn.detach()
conn.execute(
"set search_path to test_schema_2, test_schema, public")
m1 = MetaData(conn)
t1_schema = Table('some_table',
m1,
schema="test_schema",
autoload=True)
t2_schema = Table('some_other_table',
m1,
schema="test_schema_2",
autoload=True)
t2_no_schema = Table('some_other_table',
m1,
autoload=True)
t1_no_schema = Table('some_table',
m1,
autoload=True)
m2 = MetaData(conn)
t1_schema_isp = Table('some_table',
m2,
schema="test_schema",
autoload=True,
postgresql_ignore_search_path=True)
t2_schema_isp = Table('some_other_table',
m2,
schema="test_schema_2",
autoload=True,
postgresql_ignore_search_path=True)
# t2_schema refers to t1_schema, but since "test_schema"
# is in the search path, we instead link to t2_no_schema
assert t2_schema.c.sid.references(
t1_no_schema.c.id)
# the two no_schema tables refer to each other also.
assert t2_no_schema.c.sid.references(
t1_no_schema.c.id)
# but if we're ignoring search path, then we maintain
# those explicit schemas vs. what the "default" schema is
assert t2_schema_isp.c.sid.references(t1_schema_isp.c.id)
@testing.provide_metadata
def test_cross_schema_reflection_seven(self):
# test that the search path *is* taken into account
# by default
meta1 = self.metadata
Table('some_table', meta1,
Column('id', Integer, primary_key=True),
schema='test_schema'
)
Table('some_other_table', meta1,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('test_schema.some_table.id')),
schema='test_schema_2'
)
meta1.create_all()
with testing.db.connect() as conn:
conn.detach()
conn.execute(
"set search_path to test_schema_2, test_schema, public")
meta2 = MetaData(conn)
meta2.reflect(schema="test_schema_2")
eq_(set(meta2.tables), set(
['test_schema_2.some_other_table', 'some_table']))
meta3 = MetaData(conn)
meta3.reflect(
schema="test_schema_2", postgresql_ignore_search_path=True)
eq_(set(meta3.tables), set(
['test_schema_2.some_other_table', 'test_schema.some_table']))
@testing.provide_metadata
def test_uppercase_lowercase_table(self):
metadata = self.metadata
a_table = Table('a', metadata, Column('x', Integer))
A_table = Table('A', metadata, Column('x', Integer))
a_table.create()
assert testing.db.has_table("a")
assert not testing.db.has_table("A")
A_table.create(checkfirst=True)
assert testing.db.has_table("A")
def test_uppercase_lowercase_sequence(self):
a_seq = Sequence('a')
A_seq = Sequence('A')
a_seq.create(testing.db)
assert testing.db.dialect.has_sequence(testing.db, "a")
assert not testing.db.dialect.has_sequence(testing.db, "A")
A_seq.create(testing.db, checkfirst=True)
assert testing.db.dialect.has_sequence(testing.db, "A")
a_seq.drop(testing.db)
A_seq.drop(testing.db)
@testing.provide_metadata
def test_index_reflection(self):
""" Reflecting partial & expression-based indexes should warn
"""
metadata = self.metadata
t1 = Table(
'party', metadata,
Column(
'id', String(10), nullable=False),
Column(
'name', String(20), index=True),
Column(
'aname', String(20)))
metadata.create_all()
testing.db.execute("""
create index idx1 on party ((id || name))
""")
testing.db.execute("""
create unique index idx2 on party (id) where name = 'test'
""")
testing.db.execute("""
create index idx3 on party using btree
(lower(name::text), lower(aname::text))
""")
def go():
m2 = MetaData(testing.db)
t2 = Table('party', m2, autoload=True)
assert len(t2.indexes) == 2
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
r1, r2 = [idx[1] for idx in tmp]
assert r1.name == 'idx2'
assert r1.unique == True
assert r2.unique == False
assert [t2.c.id] == r1.columns
assert [t2.c.name] == r2.columns
testing.assert_warnings(
go,
['Skipped unsupported reflection of '
'expression-based index idx1',
'Predicate of partial index idx2 ignored during '
'reflection',
'Skipped unsupported reflection of '
'expression-based index idx3'])
@testing.provide_metadata
def test_index_reflection_modified(self):
"""reflect indexes when a column name has changed - PG 9
does not update the name of the column in the index def.
[ticket:2141]
"""
metadata = self.metadata
t1 = Table('t', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer)
)
metadata.create_all()
conn = testing.db.connect().execution_options(autocommit=True)
conn.execute("CREATE INDEX idx1 ON t (x)")
conn.execute("ALTER TABLE t RENAME COLUMN x to y")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{'unique': False, 'column_names': ['y'], 'name': 'idx1'}])
conn.close()
@testing.provide_metadata
def test_index_reflection_with_storage_options(self):
"""reflect indexes with storage options set"""
metadata = self.metadata
Table(
't', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer)
)
metadata.create_all()
with testing.db.connect().execution_options(autocommit=True) as conn:
conn.execute("CREATE INDEX idx1 ON t (x) WITH (fillfactor = 50)")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{'unique': False, 'column_names': ['x'], 'name': 'idx1',
'dialect_options':
{"postgresql_with": {"fillfactor": "50"}}}])
m = MetaData()
t1 = Table('t', m, autoload_with=conn)
eq_(
list(t1.indexes)[0].dialect_options['postgresql']['with'],
{"fillfactor": "50"}
)
@testing.provide_metadata
def test_index_reflection_with_access_method(self):
"""reflect indexes with storage options set"""
metadata = self.metadata
Table(
't', metadata,
Column('id', Integer, primary_key=True),
Column('x', ARRAY(Integer))
)
metadata.create_all()
with testing.db.connect().execution_options(autocommit=True) as conn:
conn.execute("CREATE INDEX idx1 ON t USING gin (x)")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{'unique': False, 'column_names': ['x'], 'name': 'idx1',
'dialect_options': {'postgresql_using': 'gin'}}])
m = MetaData()
t1 = Table('t', m, autoload_with=conn)
eq_(
list(t1.indexes)[0].dialect_options['postgresql']['using'],
'gin'
)
@testing.provide_metadata
def test_foreign_key_option_inspection(self):
metadata = self.metadata
Table(
'person',
metadata,
Column(
'id',
String(
length=32),
nullable=False,
primary_key=True),
Column(
'company_id',
ForeignKey(
'company.id',
name='person_company_id_fkey',
match='FULL',
onupdate='RESTRICT',
ondelete='RESTRICT',
deferrable=True,
initially='DEFERRED')))
Table(
'company', metadata,
Column('id', String(length=32), nullable=False, primary_key=True),
Column('name', String(length=255)),
Column(
'industry_id',
ForeignKey(
'industry.id',
name='company_industry_id_fkey',
onupdate='CASCADE', ondelete='CASCADE',
deferrable=False, # PG default
# PG default
initially='IMMEDIATE'
)
)
)
Table('industry', metadata,
Column('id', Integer(), nullable=False, primary_key=True),
Column('name', String(length=255))
)
fk_ref = {
'person_company_id_fkey': {
'name': 'person_company_id_fkey',
'constrained_columns': ['company_id'],
'referred_columns': ['id'],
'referred_table': 'company',
'referred_schema': None,
'options': {
'onupdate': 'RESTRICT',
'deferrable': True,
'ondelete': 'RESTRICT',
'initially': 'DEFERRED',
'match': 'FULL'
}
},
'company_industry_id_fkey': {
'name': 'company_industry_id_fkey',
'constrained_columns': ['industry_id'],
'referred_columns': ['id'],
'referred_table': 'industry',
'referred_schema': None,
'options': {
'onupdate': 'CASCADE',
'deferrable': None,
'ondelete': 'CASCADE',
'initially': None,
'match': None
}
}
}
metadata.create_all()
inspector = inspect(testing.db)
fks = inspector.get_foreign_keys('person') + \
inspector.get_foreign_keys('company')
for fk in fks:
eq_(fk, fk_ref[fk['name']])
@testing.provide_metadata
def test_inspect_enums_schema(self):
conn = testing.db.connect()
enum_type = postgresql.ENUM(
'sad', 'ok', 'happy', name='mood',
schema='test_schema',
metadata=self.metadata)
enum_type.create(conn)
inspector = reflection.Inspector.from_engine(conn.engine)
eq_(
inspector.get_enums('test_schema'), [{
'visible': False,
'name': 'mood',
'schema': 'test_schema',
'labels': ['sad', 'ok', 'happy']
}])
@testing.provide_metadata
def test_inspect_enums(self):
enum_type = postgresql.ENUM(
'cat', 'dog', 'rat', name='pet', metadata=self.metadata)
enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(inspector.get_enums(), [
{
'visible': True,
'labels': ['cat', 'dog', 'rat'],
'name': 'pet',
'schema': 'public'
}])
@testing.provide_metadata
def test_inspect_enums_star(self):
enum_type = postgresql.ENUM(
'cat', 'dog', 'rat', name='pet', metadata=self.metadata)
schema_enum_type = postgresql.ENUM(
'sad', 'ok', 'happy', name='mood',
schema='test_schema',
metadata=self.metadata)
enum_type.create(testing.db)
schema_enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(inspector.get_enums(), [
{
'visible': True,
'labels': ['cat', 'dog', 'rat'],
'name': 'pet',
'schema': 'public'
}])
eq_(inspector.get_enums('*'), [
{
'visible': True,
'labels': ['cat', 'dog', 'rat'],
'name': 'pet',
'schema': 'public'
},
{
'visible': False,
'name': 'mood',
'schema': 'test_schema',
'labels': ['sad', 'ok', 'happy']
}])
@testing.provide_metadata
@testing.only_on("postgresql >= 8.5")
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
meta = self.metadata
uc_table = Table('pgsql_uc', meta,
Column('a', String(10)),
UniqueConstraint('a', name='uc_a'))
uc_table.create()
# PostgreSQL will create an implicit index for a unique
# constraint. Separately we get both
indexes = set(i['name'] for i in insp.get_indexes('pgsql_uc'))
constraints = set(i['name']
for i in insp.get_unique_constraints('pgsql_uc'))
self.assert_('uc_a' in indexes)
self.assert_('uc_a' in constraints)
# reflection corrects for the dupe
reflected = Table('pgsql_uc', MetaData(testing.db), autoload=True)
indexes = set(i.name for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_('uc_a' not in indexes)
self.assert_('uc_a' in constraints)
@testing.provide_metadata
def test_reflect_unique_index(self):
insp = inspect(testing.db)
meta = self.metadata
# a unique index OTOH we are able to detect is an index
# and not a unique constraint
uc_table = Table('pgsql_uc', meta,
Column('a', String(10)),
Index('ix_a', 'a', unique=True))
uc_table.create()
indexes = dict((i['name'], i) for i in insp.get_indexes('pgsql_uc'))
constraints = set(i['name']
for i in insp.get_unique_constraints('pgsql_uc'))
self.assert_('ix_a' in indexes)
assert indexes['ix_a']['unique']
self.assert_('ix_a' not in constraints)
reflected = Table('pgsql_uc', MetaData(testing.db), autoload=True)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_('ix_a' in indexes)
assert indexes['ix_a'].unique
self.assert_('ix_a' not in constraints)
class CustomTypeReflectionTest(fixtures.TestBase):
class CustomType(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
ischema_names = None
def setup(self):
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = ischema_names.copy()
self.ischema_names = ischema_names
def teardown(self):
postgresql.PGDialect.ischema_names = self.ischema_names
self.ischema_names = None
def _assert_reflected(self, dialect):
for sch, args in [
('my_custom_type', (None, None)),
('my_custom_type()', (None, None)),
('my_custom_type(ARG1)', ('ARG1', None)),
('my_custom_type(ARG1, ARG2)', ('ARG1', 'ARG2')),
]:
column_info = dialect._get_column_info(
'colname', sch, None, False,
{}, {}, 'public')
assert isinstance(column_info['type'], self.CustomType)
eq_(column_info['type'].arg1, args[0])
eq_(column_info['type'].arg2, args[1])
def test_clslevel(self):
postgresql.PGDialect.ischema_names['my_custom_type'] = self.CustomType
dialect = postgresql.PGDialect()
self._assert_reflected(dialect)
def test_instancelevel(self):
dialect = postgresql.PGDialect()
dialect.ischema_names = dialect.ischema_names.copy()
dialect.ischema_names['my_custom_type'] = self.CustomType
self._assert_reflected(dialect)
|
cancro7/gem5 | refs/heads/master | src/mem/slicc/symbols/Action.py | 92 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
class Action(Symbol):
def __init__(self, table, ident, resources, location, pairs):
super(Action, self).__init__(table, ident, location, pairs)
self.resources = resources
def __repr__(self):
return "[Action: %s]" % self.ident
__all__ = [ "Action" ]
|
bolabola/PTVS | refs/heads/master | Python/Tests/TestData/Grammar/FromFuture26.py | 18 | from __future__ import print_function
from __future__ import unicode_literals
|
kidaa/convergences | refs/heads/master | generate_dataset.py | 2 | # -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
from math import ceil
from collections import defaultdict
import codecs
import json
from multiprocessing import Pool as ThreadPool
import click
from pync import Notifier
import wekeypedia
from wekeypedia.wikipedia.api import api as api
from generate_stats import compute_stats
data_dir = "data"
options = {
"force": False
}
def api_bunch(page_titles, lang, req):
results = defaultdict(list)
param = req
w = api(lang)
# print len(page_titles)
for i in range(0,int(ceil(len(page_titles)/50))):
param["titles"] = "|".join(page_titles[i*50:i*50+50])
while True:
r = w.get(param, method="post")
if hasattr(r, 'status_code'):
click.secho("Request error", bg="red", fg="black")
print r
continue
for pageid, p in r["query"]["pages"].items():
if "langlinks" in p:
results[ p["title"] ].extend(p['langlinks'])
# results.update({ p["title"]: p['langlinks'] for pageid, p in r["query"]["pages"].items() if 'langlinks' in p })
if "continue" in r:
param.update(r["continue"])
else:
break
return results
def get_lang_projection(pages, source, target):
params = {
"redirects": "",
"format": "json",
"action": "query",
"prop": "info|langlinks",
"lllimit": 500,
"lllang": target,
"continue":""
}
r = api_bunch(pages, source, params)
convergence = [ (page, t["*"]) for page,tt in r.items() for t in tt if t["lang"] == target ]
return convergence
def replace_redirects(pages, lang, flat=True):
results = set(pages)
results_dict = {}
params = {
"redirects": "",
"format": "json",
"action": "query"
}
w = api(lang)
for i in range(0,int(ceil(len(pages)/50))):
# print i*50,i*50+50-1
params["titles"] = "|".join(pages[i*50:i*50+50])
resp = w.post(params)
if hasattr(resp, 'status_code'):
click.secho("Request error", bg="red", fg="black")
print resp
continue
if "redirects" in resp["query"]:
results = results - set([ r["from"] for r in resp["query"]["redirects"]])
results = results | set([ r["to"] for r in resp["query"]["redirects"]])
results_dict[r["from"]] = r["to"]
if flat:
return list(results)
else:
return results_dict
def from_to(page, source, target, dataset_name, skip=False):
p = wekeypedia.WikipediaPage(page, source)
#links = list({ x["title"] for x in p.get_links() })
links = replace_redirects(list({ x["title"] for x in p.get_links() }), p.lang)
print( u"[{0}][{3}] {1} ---> {2} links".format(source, p.title, len(links), target) )
if not skip:
write(u"data/{0}/{1}.json".format(dataset_name, source), links)
links_translated = get_lang_projection(links, source, target)
links_translated_redirects = replace_redirects(list({ x[1] for x in links_translated }), target, False)
links_translated = { x[0]: links_translated_redirects.setdefault(x[1], x[1]) for x in links_translated }
write(u"data/{0}/{1}.{2}.json".format(dataset_name, source, target), links_translated)
def write(filename, data):
with codecs.open(filename, "w", "utf-8-sig") as f:
json.dump(data, f, ensure_ascii=False, indent=2, separators=(',', ': '))
def m(args):
lang = args[0]
page = args[1]
p_lang = args[2]
p_title = args[3]
options = args[4]
if lang in ignore_langs:
return
if options["force"] or not os.path.isfile(u"data/{1}.{0}/{3}.{1}.json".format(p_title, p_lang, page, lang)):
from_to(page, lang, p_lang, u"{1}.{0}".format(p_title, p_lang))
if options["force"] or not os.path.isfile(u"data/{1}.{0}/{1}.{3}.json".format(p_title, p_lang, page, lang)):
from_to(p_title, p_lang, lang, u"{1}.{0}".format(p_title, p_lang), skip=True)
def compute_source(source):
# source = "Love" # "Love"
lang = "en"
if "#" in source:
lang = source.split("#")[1]
source = source.split("#")[0]
Notifier.notify('begining download for {1} ({0})'.format(lang, source), title='generate_dataset.py')
p = wekeypedia.WikipediaPage(source, lang)
links = replace_redirects(list({ x["title"] for x in p.get_links() }), p.lang)
print("links: {0}".format(len(links)))
if not os.path.exists(u"data/{1}.{0}".format(source, p.lang)):
os.makedirs(u"data/{1}.{0}".format(source, p.lang))
write(u"data/{1}.{0}/{1}.json".format(source, p.lang), links)
available_langs = { x["lang"]: x["*"] for x in p.get_langlinks() }
write(u"data/{1}.{0}.json".format(source, p.lang), available_langs)
# map(m, [ (x[0], x[1], p.lang, p.title) for x in available_langs.items() ] )
pool = ThreadPool(4)
pool.map(m, [ (x[0], x[1], p.lang, p.title, options) for x in available_langs.items() ] )
Notifier.notify('download for {1} ({0}) is finised'.format(lang, source), title='generate_dataset.py')
pool.close()
pool.join()
ignore_langs = []
def reset():
pass
def current_datasets():
result = defaultdict(list)
files = [ f for f in os.listdir(data_dir) if os.path.isfile("/".join([ data_dir,f ])) ]
for f in [ f for f in files if "stats" not in f ]:
lang,page,format = f.split(".")
result[lang].append(page)
return result
@click.command()
@click.option("--datasets", help="list of current datasets", is_flag=True)
@click.option("--reset", help="remove all datasets", is_flag=True)
@click.option("--force","-f", help="force dataset creation", is_flag=True)
@click.option("--page", "-p", default=[], help="list of pages", multiple=True)
def cli(page, datasets, reset, force):
options["force"] = force
if datasets:
datasets = current_datasets()
for l in datasets.keys():
click.echo(l)
click.echo("-"*len(l))
for p in datasets[l]:
click.echo(p)
click.echo("")
if len(page) > 0:
sources = page
sources = [ s for s in sources ]
click.echo(sources)
map(compute_source, sources)
click.echo("computing statistics")
map(compute_stats, sources)
if __name__ == "__main__":
cli()
|
ormnv/os_final_project | refs/heads/master | django/contrib/gis/db/backends/oracle/creation.py | 620 | from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.util import truncate_name
class OracleCreation(DatabaseCreation):
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(OracleCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('INSERT INTO ') +
style.SQL_TABLE('USER_SDO_GEOM_METADATA') +
' (%s, %s, %s, %s)\n ' % tuple(map(qn, ['TABLE_NAME', 'COLUMN_NAME', 'DIMINFO', 'SRID'])) +
style.SQL_KEYWORD(' VALUES ') + '(\n ' +
style.SQL_TABLE(gqn(db_table)) + ',\n ' +
style.SQL_FIELD(gqn(f.column)) + ',\n ' +
style.SQL_KEYWORD("MDSYS.SDO_DIM_ARRAY") + '(\n ' +
style.SQL_KEYWORD("MDSYS.SDO_DIM_ELEMENT") +
("('LONG', %s, %s, %s),\n " % (f._extent[0], f._extent[2], f._tolerance)) +
style.SQL_KEYWORD("MDSYS.SDO_DIM_ELEMENT") +
("('LAT', %s, %s, %s)\n ),\n" % (f._extent[1], f._extent[3], f._tolerance)) +
' %s\n );' % f.srid)
if f.spatial_index:
# Getting the index name, Oracle doesn't allow object
# names > 30 characters.
idx_name = truncate_name('%s_%s_id' % (db_table, f.column), 30)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn(idx_name)) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) + '(' +
style.SQL_FIELD(qn(f.column)) + ') ' +
style.SQL_KEYWORD('INDEXTYPE IS ') +
style.SQL_TABLE('MDSYS.SPATIAL_INDEX') + ';')
return output
|
yongshengwang/hue | refs/heads/master | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlrd/xldate.py | 73 | # -*- coding: cp1252 -*-
# No part of the content of this file was derived from the works of David Giffin.
##
# <p>Copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
#
# <p>Provides function(s) for dealing with Microsoft Excel dates.</p>
##
# 2008-10-18 SJM Fix bug in xldate_from_date_tuple (affected some years after 2099)
# The conversion from days to (year, month, day) starts with
# an integral "julian day number" aka JDN.
# FWIW, JDN 0 corresponds to noon on Monday November 24 in Gregorian year -4713.
# More importantly:
# Noon on Gregorian 1900-03-01 (day 61 in the 1900-based system) is JDN 2415080.0
# Noon on Gregorian 1904-01-02 (day 1 in the 1904-based system) is JDN 2416482.0
from timemachine import int_floor_div as ifd
_JDN_delta = (2415080 - 61, 2416482 - 1)
assert _JDN_delta[1] - _JDN_delta[0] == 1462
class XLDateError(ValueError): pass
class XLDateNegative(XLDateError): pass
class XLDateAmbiguous(XLDateError): pass
class XLDateTooLarge(XLDateError): pass
class XLDateBadDatemode(XLDateError): pass
class XLDateBadTuple(XLDateError): pass
_XLDAYS_TOO_LARGE = (2958466, 2958466 - 1462) # This is equivalent to 10000-01-01
##
# Convert an Excel number (presumed to represent a date, a datetime or a time) into
# a tuple suitable for feeding to datetime or mx.DateTime constructors.
# @param xldate The Excel number
# @param datemode 0: 1900-based, 1: 1904-based.
# <br>WARNING: when using this function to
# interpret the contents of a workbook, you should pass in the Book.datemode
# attribute of that workbook. Whether
# the workbook has ever been anywhere near a Macintosh is irrelevant.
# @return Gregorian (year, month, day, hour, minute, nearest_second).
# <br>Special case: if 0.0 <= xldate < 1.0, it is assumed to represent a time;
# (0, 0, 0, hour, minute, second) will be returned.
# <br>Note: 1904-01-01 is not regarded as a valid date in the datemode 1 system; its "serial number"
# is zero.
# @throws XLDateNegative xldate < 0.00
# @throws XLDateAmbiguous The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0)
# @throws XLDateTooLarge Gregorian year 10000 or later
# @throws XLDateBadDatemode datemode arg is neither 0 nor 1
# @throws XLDateError Covers the 4 specific errors
def xldate_as_tuple(xldate, datemode):
if datemode not in (0, 1):
raise XLDateBadDatemode(datemode)
if xldate == 0.00:
return (0, 0, 0, 0, 0, 0)
if xldate < 0.00:
raise XLDateNegative(xldate)
xldays = int(xldate)
frac = xldate - xldays
seconds = int(round(frac * 86400.0))
assert 0 <= seconds <= 86400
if seconds == 86400:
hour = minute = second = 0
xldays += 1
else:
# second = seconds % 60; minutes = seconds // 60
minutes, second = divmod(seconds, 60)
# minute = minutes % 60; hour = minutes // 60
hour, minute = divmod(minutes, 60)
if xldays >= _XLDAYS_TOO_LARGE[datemode]:
raise XLDateTooLarge(xldate)
if xldays == 0:
return (0, 0, 0, hour, minute, second)
if xldays < 61 and datemode == 0:
raise XLDateAmbiguous(xldate)
jdn = xldays + _JDN_delta[datemode]
yreg = (ifd(ifd(jdn * 4 + 274277, 146097) * 3, 4) + jdn + 1363) * 4 + 3
mp = ifd(yreg % 1461, 4) * 535 + 333
d = ifd(mp % 16384, 535) + 1
# mp /= 16384
mp >>= 14
if mp >= 10:
return (ifd(yreg, 1461) - 4715, mp - 9, d, hour, minute, second)
else:
return (ifd(yreg, 1461) - 4716, mp + 3, d, hour, minute, second)
# === conversions from date/time to xl numbers
def _leap(y):
if y % 4: return 0
if y % 100: return 1
if y % 400: return 0
return 1
_days_in_month = (None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
##
# Convert a date tuple (year, month, day) to an Excel date.
# @param year Gregorian year.
# @param month 1 <= month <= 12
# @param day 1 <= day <= last day of that (year, month)
# @param datemode 0: 1900-based, 1: 1904-based.
# @throws XLDateAmbiguous The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0)
# @throws XLDateBadDatemode datemode arg is neither 0 nor 1
# @throws XLDateBadTuple (year, month, day) is too early/late or has invalid component(s)
# @throws XLDateError Covers the specific errors
def xldate_from_date_tuple((year, month, day), datemode):
if datemode not in (0, 1):
raise XLDateBadDatemode(datemode)
if year == 0 and month == 0 and day == 0:
return 0.00
if not (1900 <= year <= 9999):
raise XLDateBadTuple("Invalid year: %r" % ((year, month, day),))
if not (1 <= month <= 12):
raise XLDateBadTuple("Invalid month: %r" % ((year, month, day),))
if day < 1 \
or (day > _days_in_month[month] and not(day == 29 and month == 2 and _leap(year))):
raise XLDateBadTuple("Invalid day: %r" % ((year, month, day),))
Yp = year + 4716
M = month
if M <= 2:
Yp = Yp - 1
Mp = M + 9
else:
Mp = M - 3
jdn = ifd(1461 * Yp, 4) + ifd(979 * Mp + 16, 32) + \
day - 1364 - ifd(ifd(Yp + 184, 100) * 3, 4)
xldays = jdn - _JDN_delta[datemode]
if xldays <= 0:
raise XLDateBadTuple("Invalid (year, month, day): %r" % ((year, month, day),))
if xldays < 61 and datemode == 0:
raise XLDateAmbiguous("Before 1900-03-01: %r" % ((year, month, day),))
return float(xldays)
##
# Convert a time tuple (hour, minute, second) to an Excel "date" value (fraction of a day).
# @param hour 0 <= hour < 24
# @param minute 0 <= minute < 60
# @param second 0 <= second < 60
# @throws XLDateBadTuple Out-of-range hour, minute, or second
def xldate_from_time_tuple((hour, minute, second)):
if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:
return ((second / 60.0 + minute) / 60.0 + hour) / 24.0
raise XLDateBadTuple("Invalid (hour, minute, second): %r" % ((hour, minute, second),))
##
# Convert a datetime tuple (year, month, day, hour, minute, second) to an Excel date value.
# For more details, refer to other xldate_from_*_tuple functions.
# @param datetime_tuple (year, month, day, hour, minute, second)
# @param datemode 0: 1900-based, 1: 1904-based.
def xldate_from_datetime_tuple(datetime_tuple, datemode):
return (
xldate_from_date_tuple(datetime_tuple[:3], datemode)
+
xldate_from_time_tuple(datetime_tuple[3:])
)
|
Laimiux/mydeatree | refs/heads/master | django/core/management/commands/cleanup.py | 350 | import datetime
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Can be run as a cronjob or directly to clean out old data from the database (only expired sessions at the moment)."
def handle_noargs(self, **options):
from django.db import transaction
from django.contrib.sessions.models import Session
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
transaction.commit_unless_managed()
|
trishnaguha/ansible | refs/heads/devel | lib/ansible/modules/network/panos/panos_match_rule.py | 26 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_match_rule
short_description: Test for match against a security rule on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
author: "Robert Hagen (@rnh556)"
version_added: "2.5"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
notes:
- Checkmode is not supported.
- Panorama NOT is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
rule_type:
description:
- Type of rule. Valid types are I(security) or I(nat).
default: "security"
source_zone:
description:
- The source zone.
source_ip:
description:
- The source IP address.
required: true
source_port:
description:
- The source port.
source_user:
description:
- The source user or group.
to_interface:
description:
- The inbound interface in a NAT rule.
destination_zone:
description:
- The destination zone.
destination_ip:
description:
- The destination IP address.
destination_port:
description:
- The destination port.
application:
description:
- The application.
protocol:
description:
- The IP protocol number from 1 to 255.
category:
description:
- URL category
vsys_id:
description:
- ID of the VSYS object.
default: "vsys1"
required: true
'''
EXAMPLES = '''
- name: check security rules for Google DNS
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'security'
source_ip: '10.0.0.0'
destination_ip: '8.8.8.8'
application: 'dns'
destination_port: '53'
protocol: '17'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check security rules inbound SSH with user match
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'security'
source_ip: '0.0.0.0'
source_user: 'mydomain\\jsmith'
destination_ip: '192.168.100.115'
destination_port: '22'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check NAT rules for source NAT
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'nat'
source_zone: 'Prod-DMZ'
source_ip: '10.10.118.50'
to_interface: 'ethernet1/2'
destination_zone: 'Internet'
destination_ip: '0.0.0.0'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check NAT rules for inbound web
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'nat'
source_zone: 'Internet'
source_ip: '0.0.0.0'
to_interface: 'ethernet1/1'
destination_zone: 'Prod DMZ'
destination_ip: '192.168.118.50'
destination_port: '80'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check security rules for outbound POP3 in vsys4
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
vsys_id: 'vsys4'
rule_type: 'security'
source_ip: '10.0.0.0'
destination_ip: '4.3.2.1'
application: 'pop3'
destination_port: '110'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
try:
from pan.xapi import PanXapiError
from pan.xapi import PanXapiError
from pandevice import base
from pandevice import policies
from pandevice import panorama
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def create_security_test(**kwargs):
security_test = 'test security-policy-match'
# Add the source IP (required)
if kwargs['source_ip']:
security_test += ' source \"%s\"' % kwargs['source_ip']
# Add the source user (optional)
if kwargs['source_user']:
security_test += ' source-user \"%s\"' % kwargs['source_user']
# Add the destination IP (required)
if kwargs['destination_ip']:
security_test += ' destination \"%s\"' % kwargs['destination_ip']
# Add the application (optional)
if kwargs['application']:
security_test += ' application \"%s\"' % kwargs['application']
# Add the destination port (required)
if kwargs['destination_port']:
security_test += ' destination-port \"%s\"' % kwargs['destination_port']
# Add the IP protocol number (required)
if kwargs['protocol']:
security_test += ' protocol \"%s\"' % kwargs['protocol']
# Add the URL category (optional)
if kwargs['category']:
security_test += ' category \"%s\"' % kwargs['category']
# Return the resulting string
return security_test
def create_nat_test(**kwargs):
nat_test = 'test nat-policy-match'
# Add the source zone (optional)
if kwargs['source_zone']:
nat_test += ' from \"%s\"' % kwargs['source_zone']
# Add the source IP (required)
if kwargs['source_ip']:
nat_test += ' source \"%s\"' % kwargs['source_ip']
# Add the source user (optional)
if kwargs['source_port']:
nat_test += ' source-port \"%s\"' % kwargs['source_port']
# Add inbound interface (optional)
if kwargs['to_interface']:
nat_test += ' to-interface \"%s\"' % kwargs['to_interface']
# Add the destination zone (optional)
if kwargs['destination_zone']:
nat_test += ' to \"%s\"' % kwargs['destination_zone']
# Add the destination IP (required)
if kwargs['destination_ip']:
nat_test += ' destination \"%s\"' % kwargs['destination_ip']
# Add the destination port (optional)
if kwargs['destination_port']:
nat_test += ' destination-port \"%s\"' % kwargs['destination_port']
# Add the IP protocol number (required)
if kwargs['protocol']:
nat_test += ' protocol \"%s\"' % kwargs['protocol']
# Return the resulting string
return nat_test
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
vsys_id=dict(default='vsys1'),
rule_type=dict(required=True, choices=['security', 'nat']),
source_zone=dict(default=None),
source_ip=dict(default=None),
source_user=dict(default=None),
source_port=dict(default=None, type=int),
to_interface=dict(default=None),
destination_zone=dict(default=None),
category=dict(default=None),
application=dict(default=None),
protocol=dict(default=None, type=int),
destination_ip=dict(default=None),
destination_port=dict(default=None, type=int)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
vsys_id = module.params['vsys_id']
rule_type = module.params['rule_type']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_user = module.params['source_user']
source_port = module.params['source_port']
to_interface = module.params['to_interface']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
destination_port = module.params['destination_port']
category = module.params['category']
application = module.params['application']
protocol = module.params['protocol']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# Fail the module if this is a Panorama instance
if isinstance(device, panorama.Panorama):
module.fail_json(
failed=1,
msg='Panorama is not supported.'
)
# Create and attach security and NAT rulebases. Then populate them.
sec_rule_base = nat_rule_base = policies.Rulebase()
device.add(sec_rule_base)
device.add(nat_rule_base)
policies.SecurityRule.refreshall(sec_rule_base)
policies.NatRule.refreshall(nat_rule_base)
# Which action shall we take on the object?
if rule_type == 'security':
# Search for the object
test_string = create_security_test(
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
destination_port=destination_port,
application=application,
protocol=protocol,
category=category
)
elif rule_type == 'nat':
test_string = create_nat_test(
source_zone=source_zone,
source_ip=source_ip,
source_port=source_port,
to_interface=to_interface,
destination_zone=destination_zone,
destination_ip=destination_ip,
destination_port=destination_port,
protocol=protocol
)
# Submit the op command with the appropriate test string
try:
response = device.op(cmd=test_string, vsys=vsys_id)
except PanXapiError as exc:
module.fail_json(msg=exc.message)
if response.find('result/rules').__len__() == 1:
rule_name = response.find('result/rules/entry').text.split(';')[0]
elif rule_type == 'nat':
module.exit_json(msg='No matching NAT rule.')
else:
module.fail_json(msg='Rule match failed. Please check playbook syntax.')
if rule_type == 'security':
rule_match = sec_rule_base.find(rule_name, policies.SecurityRule)
elif rule_type == 'nat':
rule_match = nat_rule_base.find(rule_name, policies.NatRule)
# Print out the rule
module.exit_json(
stdout_lines=json.dumps(xmltodict.parse(rule_match.element_str()), indent=2),
msg='Rule matched'
)
if __name__ == '__main__':
main()
|
nconlisk/python | refs/heads/master | VTK/abq_rpt.py | 1 | import os
import io
import csv
from abaqus import *
from abaqusConstants import *
########################################################
# #
# Author: Noel Conlisk #
# Email: noecon@gmail.com #
# Script function: Stress data extraction script #
# #
# Prerequisites: Must be called through Abaqus/CAE #
# #
########################################################
# TO CALL FROM CMD TYPE abq6101 cae noGUI=abq_rpt.py.
# Find all .odb files in the directory and put them in a list
path = 'E:\\SoFIA3\\test_conversion\\'
# path = raw_input('input path to .odb file: ')
target_files = [f for f in os.listdir(path) if f.endswith('.odb')]
a = int(len(target_files))
# Main loop
for i in range(0, a):
jobfile = target_files[i]
patient_ID = jobfile.split('.odb')[0]
filename = path+'\\'+jobfile
# create odb object from odb file.
o1 = session.openOdb(name=filename)
session.viewports['Viewport: 1'].setValues(displayedObject=o1)
# frame = o1.steps[ 'Step-1' ].frames[-1]
frame = len(o1.steps['Step-1'].frames) - 1
# the - 1 is becaue odb file starts from 0 and results from 1, so if 4
# increments len would return 5 (where increments are 0 - 4),
# as we want the last increment or 4 in the example we simply subtract 1
# from the total size.
# Section to generate .rpt file of nodal stresses.
report_name = path + '\\' + patient_ID + '_unique_nodal.rpt'
odb = session.odbs[filename]
nf = NumberFormat(numDigits=6, precision=0, format=AUTOMATIC)
session.fieldReportOptions.setValues(numberFormat=nf, printTotal=OFF, printMinMax=OFF)
session.writeFieldReport(fileName=report_name, append=ON, sortItem='Node Label', odb=odb, step=0, frame=frame, outputPosition=NODAL, variable=(('S', INTEGRATION_POINT, ((INVARIANT, 'Mises'), )), ))
o1.close()
sys.exit()
|
saintdragon2/python-3-lecture-2015 | refs/heads/master | civil-final/1st_presentation/11조/pygame/tests/run_tests__tests/print_stdout/__init__.py | 6353 | # empty
|
rsteca/python-social-auth | refs/heads/master | social/backends/qiita.py | 70 | """
Qiita OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/qiita.html
http://qiita.com/api/v2/docs#get-apiv2oauthauthorize
"""
import json
from social.backends.oauth import BaseOAuth2
class QiitaOAuth2(BaseOAuth2):
"""Qiita OAuth authentication backend"""
name = 'qiita'
AUTHORIZATION_URL = 'https://qiita.com/api/v2/oauth/authorize'
ACCESS_TOKEN_URL = 'https://qiita.com/api/v2/access_tokens'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ' '
REDIRECT_STATE = True
EXTRA_DATA = [
('description', 'description'),
('facebook_id', 'facebook_id'),
('followees_count', 'followees_count'),
('followers_count', 'followers_count'),
('github_login_name', 'github_login_name'),
('id', 'id'),
('items_count', 'items_count'),
('linkedin_id', 'linkedin_id'),
('location', 'location'),
('name', 'name'),
('organization', 'organization'),
('profile_image_url', 'profile_image_url'),
('twitter_screen_name', 'twitter_screen_name'),
('website_url', 'website_url'),
]
def auth_complete_params(self, state=None):
data = super(QiitaOAuth2, self).auth_complete_params(state)
if "grant_type" in data:
del data["grant_type"]
if "redirect_uri" in data:
del data["redirect_uri"]
return json.dumps(data)
def auth_headers(self):
return {'Content-Type': 'application/json'}
def request_access_token(self, *args, **kwargs):
data = super(QiitaOAuth2, self).request_access_token(*args, **kwargs)
data.update({'access_token': data['token']})
return data
def get_user_details(self, response):
"""Return user details from Qiita account"""
return {
'username': response['id'],
'fullname': response['name'],
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://qiita.com/api/v2/authenticated_user',
headers={
'Authorization': ' Bearer {0}'.format(access_token)
})
|
IndraVikas/scikit-neuralnetwork | refs/heads/master | examples/bench_cifar10.py | 5 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
import sys
import pickle
import numpy as np
PRETRAIN = False
def load(name):
# Pickle module isn't backwards compatible. Hack so it works:
compat = {'encoding': 'latin1'} if sys.version_info[0] == 3 else {}
print("\t"+name)
try:
with open(name, 'rb') as f:
return pickle.load(f, **compat)
except IOError:
import gzip
with gzip.open(name+'.gz', 'rb') as f:
return pickle.load(f, **compat)
# Download and extract Python data for CIFAR10 manually from here:
# http://www.cs.toronto.edu/~kriz/cifar.html
print("Loading...")
dataset1 = load('data_batch_1')
dataset2 = load('data_batch_2')
dataset3 = load('data_batch_3')
dataset4 = load('data_batch_4')
dataset5 = load('data_batch_5')
dataset0 = load('test_batch')
print("")
data_train = np.vstack([dataset1['data']]) #, dataset2['data'], dataset3['data'], dataset4['data'], dataset5['data']])
labels_train = np.hstack([dataset1['labels']]) #, dataset2['labels'], dataset3['labels'], dataset4['labels'], dataset5['labels']])
data_train = data_train.astype('float') / 255.
labels_train = labels_train
data_test = dataset0['data'].astype('float') / 255.
labels_test = np.array(dataset0['labels'])
n_feat = data_train.shape[1]
n_targets = labels_train.max() + 1
from sknn import mlp
nn = mlp.Classifier(
layers=[
mlp.Layer("Tanh", units=n_feat/8),
mlp.Layer("Sigmoid", units=n_feat/16),
mlp.Layer("Softmax", units=n_targets)],
n_iter=50,
n_stable=10,
learning_rate=0.002,
learning_rule="momentum",
valid_size=0.1,
verbose=1)
if PRETRAIN:
from sknn import ae
ae = ae.AutoEncoder(
layers=[
ae.Layer("Tanh", units=n_feat/8),
ae.Layer("Sigmoid", units=n_feat/16)],
learning_rate=0.002,
n_iter=10,
verbose=1)
ae.fit(data_train)
ae.transfer(nn)
nn.fit(data_train, labels_train)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
expected = labels_test
predicted = nn.predict(data_test)
print("Classification report for classifier %s:\n%s\n" % (
nn, classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % confusion_matrix(expected, predicted))
|
movmov/cc | refs/heads/master | vendor/Twisted-10.0.0/twisted/pair/test/test_rawudp.py | 55 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.trial import unittest
from twisted.internet import protocol, reactor, error
from twisted.python import failure
from twisted.pair import rawudp
class MyProtocol(protocol.DatagramProtocol):
def __init__(self, expecting):
self.expecting = list(expecting)
def datagramReceived(self, data, (host, port)):
assert self.expecting, 'Got a packet when not expecting anymore.'
expectData, expectHost, expectPort = self.expecting.pop(0)
assert expectData == data, "Expected data %r, got %r" % (expectData, data)
assert expectHost == host, "Expected host %r, got %r" % (expectHost, host)
assert expectPort == port, "Expected port %d=0x%04x, got %d=0x%04x" % (expectPort, expectPort, port, port)
class RawUDPTestCase(unittest.TestCase):
def testPacketParsing(self):
proto = rawudp.RawUDPProtocol()
p1 = MyProtocol([
('foobar', 'testHost', 0x43A2),
])
proto.addProto(0xF00F, p1)
proto.datagramReceived("\x43\xA2" #source
+ "\xf0\x0f" #dest
+ "\x00\x06" #len
+ "\xDE\xAD" #check
+ "foobar",
partial=0,
dest='dummy',
source='testHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
assert not p1.expecting, \
'Should not expect any more packets, but still want %r' % p1.expecting
def testMultiplePackets(self):
proto = rawudp.RawUDPProtocol()
p1 = MyProtocol([
('foobar', 'testHost', 0x43A2),
('quux', 'otherHost', 0x33FE),
])
proto.addProto(0xF00F, p1)
proto.datagramReceived("\x43\xA2" #source
+ "\xf0\x0f" #dest
+ "\x00\x06" #len
+ "\xDE\xAD" #check
+ "foobar",
partial=0,
dest='dummy',
source='testHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
proto.datagramReceived("\x33\xFE" #source
+ "\xf0\x0f" #dest
+ "\x00\x05" #len
+ "\xDE\xAD" #check
+ "quux",
partial=0,
dest='dummy',
source='otherHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
assert not p1.expecting, \
'Should not expect any more packets, but still want %r' % p1.expecting
def testMultipleSameProtos(self):
proto = rawudp.RawUDPProtocol()
p1 = MyProtocol([
('foobar', 'testHost', 0x43A2),
])
p2 = MyProtocol([
('foobar', 'testHost', 0x43A2),
])
proto.addProto(0xF00F, p1)
proto.addProto(0xF00F, p2)
proto.datagramReceived("\x43\xA2" #source
+ "\xf0\x0f" #dest
+ "\x00\x06" #len
+ "\xDE\xAD" #check
+ "foobar",
partial=0,
dest='dummy',
source='testHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
assert not p1.expecting, \
'Should not expect any more packets, but still want %r' % p1.expecting
assert not p2.expecting, \
'Should not expect any more packets, but still want %r' % p2.expecting
def testWrongProtoNotSeen(self):
proto = rawudp.RawUDPProtocol()
p1 = MyProtocol([])
proto.addProto(1, p1)
proto.datagramReceived("\x43\xA2" #source
+ "\xf0\x0f" #dest
+ "\x00\x06" #len
+ "\xDE\xAD" #check
+ "foobar",
partial=0,
dest='dummy',
source='testHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
def testDemuxing(self):
proto = rawudp.RawUDPProtocol()
p1 = MyProtocol([
('foobar', 'testHost', 0x43A2),
('quux', 'otherHost', 0x33FE),
])
proto.addProto(0xF00F, p1)
p2 = MyProtocol([
('quux', 'otherHost', 0xA401),
('foobar', 'testHost', 0xA302),
])
proto.addProto(0xB050, p2)
proto.datagramReceived("\xA4\x01" #source
+ "\xB0\x50" #dest
+ "\x00\x05" #len
+ "\xDE\xAD" #check
+ "quux",
partial=0,
dest='dummy',
source='otherHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
proto.datagramReceived("\x43\xA2" #source
+ "\xf0\x0f" #dest
+ "\x00\x06" #len
+ "\xDE\xAD" #check
+ "foobar",
partial=0,
dest='dummy',
source='testHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
proto.datagramReceived("\x33\xFE" #source
+ "\xf0\x0f" #dest
+ "\x00\x05" #len
+ "\xDE\xAD" #check
+ "quux",
partial=0,
dest='dummy',
source='otherHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
proto.datagramReceived("\xA3\x02" #source
+ "\xB0\x50" #dest
+ "\x00\x06" #len
+ "\xDE\xAD" #check
+ "foobar",
partial=0,
dest='dummy',
source='testHost',
protocol='dummy',
version='dummy',
ihl='dummy',
tos='dummy',
tot_len='dummy',
fragment_id='dummy',
fragment_offset='dummy',
dont_fragment='dummy',
more_fragments='dummy',
ttl='dummy',
)
assert not p1.expecting, \
'Should not expect any more packets, but still want %r' % p1.expecting
assert not p2.expecting, \
'Should not expect any more packets, but still want %r' % p2.expecting
def testAddingBadProtos_WrongLevel(self):
"""Adding a wrong level protocol raises an exception."""
e = rawudp.RawUDPProtocol()
try:
e.addProto(42, "silliness")
except TypeError, e:
if e.args == ('Added protocol must be an instance of DatagramProtocol',):
pass
else:
raise
else:
raise AssertionError, 'addProto must raise an exception for bad protocols'
def testAddingBadProtos_TooSmall(self):
"""Adding a protocol with a negative number raises an exception."""
e = rawudp.RawUDPProtocol()
try:
e.addProto(-1, protocol.DatagramProtocol())
except TypeError, e:
if e.args == ('Added protocol must be positive or zero',):
pass
else:
raise
else:
raise AssertionError, 'addProto must raise an exception for bad protocols'
def testAddingBadProtos_TooBig(self):
"""Adding a protocol with a number >=2**16 raises an exception."""
e = rawudp.RawUDPProtocol()
try:
e.addProto(2**16, protocol.DatagramProtocol())
except TypeError, e:
if e.args == ('Added protocol must fit in 16 bits',):
pass
else:
raise
else:
raise AssertionError, 'addProto must raise an exception for bad protocols'
def testAddingBadProtos_TooBig2(self):
"""Adding a protocol with a number >=2**16 raises an exception."""
e = rawudp.RawUDPProtocol()
try:
e.addProto(2**16+1, protocol.DatagramProtocol())
except TypeError, e:
if e.args == ('Added protocol must fit in 16 bits',):
pass
else:
raise
else:
raise AssertionError, 'addProto must raise an exception for bad protocols'
|
arizvisa/syringe | refs/heads/master | lib/pecoff/portable/datadirectory.py | 1 | import sys, ptypes
from ptypes import pstruct, parray, ptype, pbinary, pstr, dyn
from ..headers import *
from . import exports, imports, resources, exceptions, relocations, debug, loader, clr, tls, headers
## directory entry base types
class AddressEntry(headers.IMAGE_DATA_DIRECTORY): addressing = staticmethod(virtualaddress)
class OffsetEntry(headers.IMAGE_DATA_DIRECTORY): addressing = staticmethod(fileoffset)
## directory entry list
class IMAGE_DIRECTORY_ENTRY_EXPORT(AddressEntry):
_object_ = exports.IMAGE_EXPORT_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_IMPORT(AddressEntry):
_object_ = imports.IMAGE_IMPORT_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_RESOURCE(AddressEntry):
#_object_ = resources.IMAGE_RESOURCE_DIRECTORY
class _object_(resources.IMAGE_RESOURCE_DIRECTORY):
_fields_ = resources.IMAGE_RESOURCE_DIRECTORY._fields_[:]
_fields_.append((lambda s: dyn.block(s.blocksize() - (s.value[-1].getoffset()+s.value[-1].blocksize()-s.value[0].getoffset())), 'ResourceData'))
class IMAGE_DIRECTORY_ENTRY_EXCEPTION(AddressEntry):
_object_ = exceptions.IMAGE_EXCEPTION_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_SECURITY(OffsetEntry):
class _object_(parray.block):
_object_ = headers.Certificate
class IMAGE_DIRECTORY_ENTRY_BASERELOC(AddressEntry):
_object_ = relocations.IMAGE_BASERELOC_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_DEBUG(AddressEntry):
_object_ = debug.IMAGE_DEBUG_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_ARCHITECTURE(AddressEntry):
'''IMAGE_DIRECTORY_ENTRY_COPYRIGHT'''
class IMAGE_DIRECTORY_ENTRY_GLOBALPTR(AddressEntry):
pass
class IMAGE_DIRECTORY_ENTRY_TLS(AddressEntry):
def _object_(self):
res = self.getparent(Header)['OptionalHeader'].li
return tls.IMAGE_TLS_DIRECTORY64 if res.is64() else tls.IMAGE_TLS_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG(AddressEntry):
def _object_(self):
res = self.getparent(Header)['OptionalHeader'].li
res = loader.IMAGE_LOAD_CONFIG_DIRECTORY64 if res.is64() else loader.IMAGE_LOAD_CONFIG_DIRECTORY32
#return dyn.clone(res, blocksize=lambda s, cb=self['Size'].li.int(): cb)
return res
class IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT(OffsetEntry):
_object_ = imports.IMAGE_BOUND_IMPORT_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_IAT(AddressEntry):
def _object_(self):
res = self.getparent(Header)['OptionalHeader'].li
return imports.IMAGE_IMPORT_ADDRESS_TABLE64 if res.is64() else imports.IMAGE_IMPORT_ADDRESS_TABLE
class IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT(AddressEntry):
_object_ = imports.IMAGE_DELAYLOAD_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR(AddressEntry):
_object_ = clr.IMAGE_COR20_HEADER
class IMAGE_DIRECTORY_ENTRY_RESERVED(AddressEntry): pass
class DataDirectoryEntry(pint.enum):
_values_ = [
('Export', 0),
('Import', 1),
('Resource', 2),
('Exception', 3),
('Security', 4),
('BaseReloc', 5),
('Debug', 6),
('Architecture', 7),
('GlobalPtr', 8),
('Tls', 9),
('LoadConfig', 10),
('BoundImport', 11),
('IAT', 12),
('DelayLoad', 13),
('ClrHeader', 14),
('Reserved', 15),
# aliases
('Exports', 0),
('Imports', 1),
('Resources', 2),
('Exceptions', 3),
('Certificate', 4),
('Reloc', 5),
('Relocations', 5),
('Relocation', 5),
('BaseRelocation', 5),
('BaseRelocations', 5),
('Global', 8),
('Thread', 9),
('ThreadLocalStorage', 9),
('LoaderConfig', 10),
('Loader', 10),
('Bound', 11),
('BoundImports', 11),
('ImportAddress', 12),
('DelayImportDescriptor', 13),
('DelayImport', 13),
('Clr', 14),
('COM', 14),
('COR20', 14),
]
class DataDirectory(parray.type):
length = 16
def __getindex__(self, key):
string_types = (str, unicode) if sys.version_info.major < 3 else (str,)
if isinstance(key, string_types):
# try and be smart in case user tries to be dumb
mapping = DataDirectoryEntry.mapping()
key, res = key.lower(), { k.lower() : v for k, v in mapping.items() }
key = key[:key.rfind('table')] if key.endswith('table') else key[:]
return res[key]
return key
def _object_(self):
entries = (
IMAGE_DIRECTORY_ENTRY_EXPORT,
IMAGE_DIRECTORY_ENTRY_IMPORT,
IMAGE_DIRECTORY_ENTRY_RESOURCE,
IMAGE_DIRECTORY_ENTRY_EXCEPTION,
IMAGE_DIRECTORY_ENTRY_SECURITY,
IMAGE_DIRECTORY_ENTRY_BASERELOC,
IMAGE_DIRECTORY_ENTRY_DEBUG,
IMAGE_DIRECTORY_ENTRY_ARCHITECTURE,
IMAGE_DIRECTORY_ENTRY_GLOBALPTR,
IMAGE_DIRECTORY_ENTRY_TLS,
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG,
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT,
IMAGE_DIRECTORY_ENTRY_IAT,
IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT,
IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR,
IMAGE_DIRECTORY_ENTRY_RESERVED,
)
return entries[len(self.value)]
def details(self, **options):
if self.initializedQ():
width = max(len(n.classname()) for n in self.value) if self.value else 0
return '\n'.join('[{:x}] {:>{}}{:4s} {:s}:+{:#x}'.format(n.getoffset(), n.classname(), width, '{%d}'%i, n['Address'].summary(), n['Size'].int()) for i, n in enumerate(self.value))
return super(DataDirectory,self).details(**options)
def repr(self, **options):
return self.details(**options)
|
vorwerkc/pymatgen | refs/heads/master | pymatgen/io/tests/test_lmto.py | 5 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ry_to_eV
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.lmto import LMTOCopl, LMTOCtrl
from pymatgen.util.num import round_to_sigfigs
from pymatgen.util.testing import PymatgenTest
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__email__ = "esters@uoregon.edu"
__date__ = "Nov 30, 2017"
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp")
this_dir = os.path.dirname(os.path.abspath(__file__))
class CtrlTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
self.ctrl_bise = LMTOCtrl.from_file(filename="CTRL.BiSe")
self.ctrl_fe = LMTOCtrl.from_file()
def tearDown(self):
os.chdir(this_dir)
def test_dict(self):
self.assertEqual(self.ctrl_bise, LMTOCtrl.from_dict(self.ctrl_bise.as_dict()))
def test_structure(self):
bise_poscar = Structure.from_file("POSCAR.BiSe")
self.assertTrue(bise_poscar.matches(self.ctrl_bise.structure))
self.assertEqual(
self.ctrl_bise,
LMTOCtrl(self.ctrl_bise.structure, header="Bi6Se6, hexagonal"),
)
def test_read_write(self):
self.ctrl_bise.write_file(filename="CTRL.tmp")
ctrl_tmp = LMTOCtrl.from_file(filename="CTRL.tmp")
self.assertTrue(self.ctrl_bise.structure.matches(ctrl_tmp.structure))
os.remove("CTRL.tmp")
class CoplTest(PymatgenTest):
def setUp(self):
os.chdir(test_dir)
self.copl_bise = LMTOCopl("COPL.BiSe")
self.copl_bise_eV = LMTOCopl(filename="COPL.BiSe", to_eV=True)
self.copl_fe = LMTOCopl()
def tearDown(self):
os.chdir(this_dir)
def test_attributes(self):
self.assertFalse(self.copl_bise.is_spin_polarized)
self.assertTrue(self.copl_fe.is_spin_polarized)
self.assertEqual(len(self.copl_bise.energies), 801)
self.assertEqual(len(self.copl_fe.energies), 801)
self.assertEqual(len(self.copl_bise.cohp_data), 7)
self.assertEqual(len(self.copl_fe.cohp_data), 8)
def test_cohp_data(self):
lengths_sites_bise = {
"Bi1-Se7": (2.882, (0, 6)),
"Bi1-Se9": (3.102, (0, 8)),
"Bi3-Se11": (2.917, (2, 10)),
"Bi3-Se9": (3.050, (2, 8)),
"Bi5-Bi6": (3.073, (4, 5)),
"Bi5-Se11": (3.375, (4, 10)),
"Se7-Se8": (3.364, (6, 7)),
}
for bond in self.copl_bise.cohp_data:
self.assertEqual(self.copl_bise.cohp_data[bond]["length"], lengths_sites_bise[bond][0])
self.assertEqual(self.copl_bise.cohp_data[bond]["sites"], lengths_sites_bise[bond][1])
labels_fe = ["Fe1-Fe1"] + ["Fe1-Fe1-%d" % i for i in range(1, 8)]
self.assertEqual(sorted(self.copl_fe.cohp_data.keys()), labels_fe)
for bond in labels_fe:
self.assertEqual(self.copl_fe.cohp_data[bond]["length"], 2.482)
self.assertEqual(self.copl_fe.cohp_data[bond]["sites"], (0, 0))
def test_energies(self):
self.assertEqual(self.copl_bise.efermi, -0.17223)
self.assertEqual(self.copl_bise_eV.efermi, -2.3433)
self.assertEqual(self.copl_fe.efermi, -0.085683)
ener_eV = np.array(
[round_to_sigfigs(energy, 5) for energy in self.copl_bise.energies * Ry_to_eV],
dtype=float,
)
self.assertArrayEqual(ener_eV, self.copl_bise_eV.energies)
copl_icohp = self.copl_bise.cohp_data["Bi1-Se7"]["ICOHP"][Spin.up]
icohp = np.array([round_to_sigfigs(i, 5) for i in copl_icohp * Ry_to_eV], dtype=float)
icohp_eV = self.copl_bise_eV.cohp_data["Bi1-Se7"]["ICOHP"][Spin.up]
self.assertArrayEqual(icohp, icohp_eV)
if __name__ == "__main__":
unittest.main()
|
tschmorleiz/amcat | refs/heads/master | api/rest/viewsets/coding/coded_article.py | 2 | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from rest_framework import serializers
from rest_framework.viewsets import ReadOnlyModelViewSet
from amcat.models import Sentence, CodedArticle, Article, Medium
from amcat.tools import amcattest
from amcat.tools.caching import cached
from api.rest.resources.amcatresource import DatatablesMixin
from api.rest.serializer import AmCATModelSerializer
from api.rest.viewset import AmCATViewSetMixin
from api.rest.viewsets.coding.codingjob import CodingJobViewSetMixin
from api.rest.viewsets.project import ProjectViewSetMixin
from api.rest.viewsets.sentence import SentenceSerializer, SentenceViewSetMixin
__all__ = (
"CodedArticleSerializer", "CodedArticleViewSetMixin", "CodedArticleViewSet",
"CodedArticleSentenceViewSet")
def article_property(property_name):
def inner(self, coded_article):
return getattr(self.get_article(coded_article), property_name)
return inner
# Hack class to allow queryset to be set, which in turn allows the metadata (OPTIONS) generator
# to include this field in its models-property.
class PseudoSerializerMethodField(serializers.SerializerMethodField):
queryset = Medium.objects.none()
class CodedArticleSerializer(AmCATModelSerializer):
model = CodedArticle
headline = serializers.SerializerMethodField("get_headline")
date = serializers.SerializerMethodField("get_date")
pagenr = serializers.SerializerMethodField("get_pagenr")
length = serializers.SerializerMethodField("get_length")
article_id = serializers.SerializerMethodField("get_article_id")
medium = PseudoSerializerMethodField("get_medium")
get_headline = article_property("headline")
get_date = article_property("date")
get_pagenr = article_property("pagenr")
get_length = article_property("length")
get_article_id = article_property("id")
@classmethod
def get_metadata_field_name(self, field):
if hasattr(field, "method_name") and field.method_name is self.base_fields["medium"].method_name:
return "ModelChoiceField"
def _get_coded_articles(self):
view = self.context["view"]
if hasattr(view, "object_list"): return view.object_list
return CodedArticle.objects.filter(id=view.object.id)
@cached
def _get_articles(self):
aids = self._get_coded_articles().values_list("article__id", flat=True)
articles = Article.objects.filter(id__in=aids).only("headline", "date", "pagenr", "length")
return { a.id : a for a in articles }
def get_article(self, coded_article):
return self._get_articles().get(coded_article.article_id)
def get_medium(self, coded_article):
return self.get_article(coded_article).medium_id
class Meta:
model = CodedArticle
class CodedArticleViewSetMixin(AmCATViewSetMixin):
model_serializer_class = CodedArticleSerializer
model_key = "coded_article"
model = CodedArticle
class CodedArticleViewSet(ProjectViewSetMixin, CodingJobViewSetMixin,
CodedArticleViewSetMixin, DatatablesMixin, ReadOnlyModelViewSet):
model = CodedArticle
model_serializer_class = CodedArticleSerializer
extra_filters = ("article__pagenr",)
def filter_queryset(self, queryset):
qs = super(CodedArticleViewSet, self).filter_queryset(queryset)
return qs.filter(id__in=self.codingjob.coded_articles.all())
class CodedArticleSentenceViewSet(ProjectViewSetMixin, CodingJobViewSetMixin,
CodedArticleViewSetMixin, SentenceViewSetMixin,
DatatablesMixin, ReadOnlyModelViewSet):
model = Sentence
model_serializer_class = SentenceSerializer
def filter_queryset(self, queryset):
qs = super(CodedArticleSentenceViewSet, self).filter_queryset(queryset)
return qs.filter(article__id=self.coded_article.article_id)
class TestCodedArticleSerializer(amcattest.AmCATTestCase):
# Simulating request
class View(object):
def __init__(self, objs):
if isinstance(objs, CodedArticle):
self.object = objs
else:
self.object_list = objs
def _get_serializer(self, coded_article):
return CodedArticleSerializer(context={"view" : self.View(coded_article)})
def test_fields(self):
c = amcattest.create_test_job()
a = c.articleset.articles.all()[0]
ca = c.coded_articles.all()[0]
s = self._get_serializer(c.coded_articles.all())
self.assertEqual(a.headline, s.get_headline(ca))
self.assertEqual(a.date, s.get_date(ca))
self.assertEqual(a.pagenr, s.get_pagenr(ca))
self.assertEqual(a.length, s.get_length(ca))
def test_n_queries(self):
c = amcattest.create_test_job(10)
s = self._get_serializer(c.coded_articles.all())
ca1, ca2, ca3 = c.coded_articles.all()[0:3]
with self.checkMaxQueries(1):
s.get_headline(ca1)
s.get_headline(ca2)
s.get_headline(ca3)
s.get_date(ca3)
s.get_pagenr(ca3)
s.get_length(ca3)
|
tiredtyrant/CloudBot | refs/heads/gonzobot | tests/plugin_tests/test_yelling.py | 3 | from mock import MagicMock
def test_yell_check():
conn = MagicMock()
bot = conn.bot
plugin_manager = bot.plugin_manager
from plugins.yelling import yell_check
plugin_manager.find_plugin.return_value = None
yell_check(conn, '#yelling', 'aaaaaaaaaaaaaa', bot, 'testuser')
conn.cmd.assert_called_with('KICK', '#yelling', 'testuser', "USE MOAR CAPS YOU TROGLODYTE!")
conn.cmd.reset_mock()
yell_check(conn, '#yelling', '11', bot, 'testuser')
conn.cmd.assert_not_called()
conn.cmd.reset_mock()
yell_check(conn, '#yelling1', '11', bot, 'testuser')
conn.cmd.assert_not_called()
conn.cmd.reset_mock()
plugin_manager.find_plugin.return_value = fake_plugin = MagicMock()
from plugins.link_announcer import url_re
fake_plugin.code.url_re = url_re
yell_check(conn, '#yelling', 'http://a aaaaaaaaaaaaaaaaaaaaaa', bot, 'testuser')
conn.cmd.assert_called_with('KICK', '#yelling', 'testuser', "USE MOAR CAPS YOU TROGLODYTE!")
conn.cmd.reset_mock()
|
kelvindk/Video-Stabilization | refs/heads/master | boost_1_42_0/tools/build/v2/tools/make.py | 4 | # Status: being ported by Vladimir Prus
# Copyright 2003 Dave Abrahams
# Copyright 2003 Douglas Gregor
# Copyright 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This module defines the 'make' main target rule.
from b2.build.targets import BasicTarget
from b2.build.virtual_target import Action, FileTarget
from b2.build import type
from b2.manager import get_manager
import b2.build.property_set
class MakeTarget(BasicTarget):
def construct(self, name, source_targets, property_set):
action_name = property_set.get("<action>")[0]
action = Action(get_manager(), source_targets, action_name, property_set)
# FIXME: type.type uses global data.
target = FileTarget(self.name(), 1, type.type(self.name()),
self.project(), action)
return [ b2.build.property_set.empty(),
[self.project().manager().virtual_targets().register(target)]]
def make (target_name, sources, generating_rule,
requirements=None, usage_requirements=None):
target_name = target_name[0]
generating_rule = generating_rule[0]
if not requirements:
requirements = []
requirements.append("<action>%s" % generating_rule)
m = get_manager()
targets = m.targets()
project = m.projects().current()
engine = m.engine()
engine.register_bjam_action(generating_rule)
targets.main_target_alternative(MakeTarget(
target_name, project,
targets.main_target_sources(sources, target_name),
targets.main_target_requirements(requirements, project),
targets.main_target_default_build([], project),
targets.main_target_usage_requirements(usage_requirements or [], project)))
get_manager().projects().add_rule("make", make)
|
jamesgk/robofab | refs/heads/master | Docs/Examples/talks/session1_06.py | 7 | # robothon06
# get a particular glyph
from robofab.world import CurrentFont
font = CurrentFont()
font.info.familyName = "myFamilyName"
font.info.styleName = "myStyleName"
font.info.autoNaming()
print font.info.fullName
print font.info.fontName
print font.info.fondName |
rnestler/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/recwarn.py | 178 | """ recording warnings during test function execution. """
import inspect
import _pytest._code
import py
import sys
import warnings
import pytest
@pytest.yield_fixture
def recwarn(request):
"""Return a WarningsRecorder instance that provides these methods:
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter('default')
yield wrec
def pytest_namespace():
return {'deprecated_call': deprecated_call,
'warns': warns}
def deprecated_call(func=None, *args, **kwargs):
""" assert that calling ``func(*args, **kwargs)`` triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``.
This function can be used as a context manager::
>>> with deprecated_call():
... myobject.deprecated_method()
Note: we cannot use WarningsRecorder here because it is still subject
to the mechanism that prevents warnings of the same type from being
triggered twice for the same module. See #1190.
"""
if not func:
return WarningsChecker(expected_warning=DeprecationWarning)
categories = []
def warn_explicit(message, category, *args, **kwargs):
categories.append(category)
old_warn_explicit(message, category, *args, **kwargs)
def warn(message, category=None, *args, **kwargs):
if isinstance(message, Warning):
categories.append(message.__class__)
else:
categories.append(category)
old_warn(message, category, *args, **kwargs)
old_warn = warnings.warn
old_warn_explicit = warnings.warn_explicit
warnings.warn_explicit = warn_explicit
warnings.warn = warn
try:
ret = func(*args, **kwargs)
finally:
warnings.warn_explicit = old_warn_explicit
warnings.warn = old_warn
deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
if not any(issubclass(c, deprecation_categories) for c in categories):
__tracebackhide__ = True
raise AssertionError("%r did not produce DeprecationWarning" % (func,))
return ret
def warns(expected_warning, *args, **kwargs):
"""Assert that code raises a particular class of warning.
Specifically, the input @expected_warning can be a warning class or
tuple of warning classes, and the code must return that warning
(if a single class) or one of those warnings (if a tuple).
This helper produces a list of ``warnings.WarningMessage`` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
"""
wcheck = WarningsChecker(expected_warning)
if not args:
return wcheck
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with wcheck:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with wcheck:
return func(*args[1:], **kwargs)
class RecordedWarning(object):
def __init__(self, message, category, filename, lineno, file, line):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
self.file = file
self.line = line
class WarningsRecorder(object):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self, module=None):
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def showwarning(message, category, filename, lineno,
file=None, line=None):
self._list.append(RecordedWarning(
message, category, filename, lineno, file, line))
# still perform old showwarning functionality
self._showwarning(
message, category, filename, lineno, file=file, line=line)
self._module.showwarning = showwarning
# allow the same warning to be raised more than once
self._module.simplefilter('always')
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, module=None):
super(WarningsChecker, self).__init__(module=module)
msg = ("exceptions must be old-style classes or "
"derived from Warning, not %s")
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(r.category in self.expected_warning for r in self):
__tracebackhide__ = True
pytest.fail("DID NOT WARN")
|
EduPepperPD/pepper2013 | refs/heads/master | static/js/vendor/mathjax-MathJax-c9db6ac/docs/source/conf.py | 86 | # -*- coding: utf-8 -*-
#
# MathJax documentation build configuration file, created by
# sphinx-quickstart on Sun May 16 23:18:19 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MathJax'
copyright = u'2012 Design Science'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
#highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'mjtheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'MathJaxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MathJax.tex', u'MathJax Documentation',
u'Davide Cervone, Casey Stark, Robert Miner, Paul Topping', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
dongjoon-hyun/tensorflow | refs/heads/master | tensorflow/compiler/tests/lrn_ops_test.py | 9 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Local Response Normalization ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
# Local response normalization tests. The forward tests are copied from
# tensorflow/python/kernel_tests/lrn_op_test.py
class LRNTest(xla_test.XLATestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta
lrn_depth_radius = np.random.randint(1, shape[3])
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
beta = 2.0 * np.random.rand()
with self.test_scope():
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
def testLrnGrad(self):
# Test for LRNGrad that compares against the CPU implementation.
shape = [1, 2, 3, 4]
total_size = np.prod(shape)
in_image_vals = np.arange(1, total_size + 1, dtype=np.float32)
out_image_vals = np.arange(1, total_size + 1, dtype=np.float32)
out_grads_vals = np.arange(1, total_size + 1, dtype=np.float32)
depth_radius = np.random.randint(1, shape[3])
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
beta = 1.0 * np.random.rand()
with self.cached_session():
in_image = constant_op.constant(in_image_vals, shape=shape)
out_image = constant_op.constant(out_image_vals, shape=shape)
out_grads = constant_op.constant(out_grads_vals, shape=shape)
with ops.device(CPU_DEVICE):
expected = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
with self.test_scope():
actual = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
expected_val = expected.eval()
actual_val = actual.eval()
self.assertAllClose(actual_val, expected_val, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
klahnakoski/ActiveData | refs/heads/dev | vendor/jx_base/expressions/is_boolean_op.py | 1 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions.expression import Expression
from jx_base.expressions.false_op import FALSE
from mo_json import BOOLEAN
class IsBooleanOp(Expression):
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, [term])
self.term = term
def __data__(self):
return {"is_boolean": self.term.__data__()}
def vars(self):
return self.term.vars()
def map(self, map_):
return (IsBooleanOp(self.term.map(map_)))
def missing(self, lang):
return FALSE
|
geekroot/erpnext | refs/heads/develop | erpnext/setup/setup_wizard/default_website.py | 38 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import nowdate
class website_maker(object):
def __init__(self, args):
self.args = args
self.company = args.company_name
self.tagline = args.company_tagline
self.user = args.name
self.make_web_page()
self.make_website_settings()
self.make_blog()
def make_web_page(self):
# home page
homepage = frappe.get_doc('Homepage', 'Homepage')
homepage.company = self.company
homepage.tag_line = self.tagline
homepage.setup_items()
homepage.save()
def make_website_settings(self):
# update in home page in settings
website_settings = frappe.get_doc("Website Settings", "Website Settings")
website_settings.home_page = 'home'
website_settings.brand_html = self.company
website_settings.copyright = self.company
website_settings.top_bar_items = []
website_settings.append("top_bar_items", {
"doctype": "Top Bar Item",
"label":"Contact",
"url": "/contact"
})
website_settings.append("top_bar_items", {
"doctype": "Top Bar Item",
"label":"Blog",
"url": "/blog"
})
website_settings.append("top_bar_items", {
"doctype": "Top Bar Item",
"label": _("Products"),
"url": "/products"
})
website_settings.save()
def make_blog(self):
blogger = frappe.new_doc("Blogger")
user = frappe.get_doc("User", self.user)
blogger.user = self.user
blogger.full_name = user.first_name + (" " + user.last_name if user.last_name else "")
blogger.short_name = user.first_name.lower()
blogger.avatar = user.user_image
blogger.insert()
blog_category = frappe.get_doc({
"doctype": "Blog Category",
"category_name": "general",
"published": 1,
"title": _("General")
}).insert()
frappe.get_doc({
"doctype": "Blog Post",
"title": "Welcome",
"published": 1,
"published_on": nowdate(),
"blogger": blogger.name,
"blog_category": blog_category.name,
"blog_intro": "My First Blog",
"content": frappe.get_template("setup/setup_wizard/data/sample_blog_post.html").render(),
}).insert()
def test():
frappe.delete_doc("Web Page", "test-company")
frappe.delete_doc("Blog Post", "welcome")
frappe.delete_doc("Blogger", "administrator")
frappe.delete_doc("Blog Category", "general")
website_maker({'company':"Test Company", 'company_tagline': "Better Tools for Everyone", 'name': "Administrator"})
frappe.db.commit()
|
mxcube/lucid | refs/heads/master | setup.py | 1 | from distutils.core import setup
import sys
setup(name = "lucid",version = "0.1",
description = "Loop and uCrystals Identification",
author="E. Francois, J. Kieffer, M. Guijarro (ESRF)",
package_dir={"lucid": "lucid"},
packages = ["lucid"])
|
bzhpwr/MediExports | refs/heads/master | project_env/lib/python2.6/site-packages/pip/_vendor/packaging/_compat.py | 901 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
henaras/horizon | refs/heads/master | openstack_dashboard/dashboards/project/volumes/utils.py | 72 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard import api
def availability_zone_list(request):
"""Utility method to retrieve a list of availability zones."""
try:
if api.cinder.extension_supported(request, 'AvailabilityZones'):
return api.cinder.availability_zone_list(request)
return []
except Exception:
exceptions.handle(request,
_('Unable to retrieve volumes availability zones.'))
return []
|
tchlux/fmodpy | refs/heads/master | fmodpy/__init__.py | 1 | # Set up some package-related things.
import os
DIRECTORY = os.path.dirname(os.path.abspath(__file__))
ABOUT_DIR = os.path.join(DIRECTORY, "about")
with open(os.path.join(ABOUT_DIR,"version.txt")) as f:
__version__ = f.read().strip()
del f
# Steal the documentation from .fmodpy, instead of duplicating here.
from .fmodpy import __doc__
# Import the main configuration (needed to initialize the configuration).
import fmodpy.config
# Import the main features of this package.
from .fmodpy import fimport, configure
# Set "__all__" so that "from fmodpy import *" returns expected stuff.
__all__ = ["fimport", "configure"]
|
runfalk/psycospans | refs/heads/master | psycospans/__init__.py | 1 | from functools import partial, wraps
from psycopg2 import connect as _connect
from psycopg2.extensions import register_adapter
from spans import *
from ._utils import adapt_range, register_range_caster, query_range_oids, floatrange_preprocess
__all__ = [
"connect",
"register_range_type"
]
@wraps(_connect)
def connect(*keys, **kwargs):
conn = _connect(*keys, **kwargs)
if conn.server_version < 90200:
raise RuntimeError("Range types not available in version {version}".format(
conn.server_version))
# Register range types
register_range_caster(
"int4range", intrange, oid=3904, subtype_oid=23, array_oid=3905, scope=conn)
register_range_caster(
"int8range", intrange, oid=3926, subtype_oid=20, array_oid=3927, scope=conn)
register_range_caster(
"numrange", floatrange_preprocess, oid=3906, subtype_oid=1700, array_oid=3907, scope=conn)
register_range_caster(
"daterange", daterange, oid=3912, subtype_oid=1082, array_oid=3913, scope=conn)
register_range_caster(
"tsrange", datetimerange, oid=3908, subtype_oid=1114, array_oid=3909, scope=conn)
return conn
def register_range_type(pgrange, pyrange, conn):
"""
Register a new range type as a PostgreSQL range.
>>> register_range_type("int4range", intrange, conn)
The above will make sure intrange is regarded as an int4range for queries
and that int4ranges will be cast into intrange when fetching rows.
pgrange should be the full name including schema for the custom range type.
Note that adaption is global, meaning if a range type is passed to a regular
psycopg2 connection it will adapt it to its proper range type. Parsing of
rows from the database however is not global and just set on a per connection
basis.
"""
register_adapter(pyrange, partial(adapt_range, pgrange))
register_range_caster(
pgrange, pyrange, *query_range_oids(pgrange, conn), scope=conn)
register_adapter(intrange, partial(adapt_range, "int4range"))
register_adapter(floatrange, partial(adapt_range, "numrange"))
register_adapter(daterange, partial(adapt_range, "daterange"))
register_adapter(datetimerange, partial(adapt_range, "tsrange"))
|
kennethlove/django | refs/heads/master | django/conf/locale/te/formats.py | 433 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
ianmabie/uberpy | refs/heads/master | venv/lib/python2.7/site-packages/wheel/test/test_tool.py | 237 | from .. import tool
def test_keygen():
def get_keyring():
WheelKeys, keyring = tool.get_keyring()
class WheelKeysTest(WheelKeys):
def save(self):
pass
class keyringTest:
backend = keyring.backend
class backends:
file = keyring.backends.file
@classmethod
def get_keyring(cls):
class keyringTest2:
pw = None
def set_password(self, a, b, c):
self.pw = c
def get_password(self, a, b):
return self.pw
return keyringTest2()
return WheelKeysTest, keyringTest
tool.keygen(get_keyring=get_keyring)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.