repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
nitely/Spirit
|
spirit/comment/like/tags.py
|
Python
|
mit
| 306
| 0
|
# -*- coding: utf-8 -*-
from ...core.tags.registry import register
from .forms import LikeForm
@register.inclusion_tag('spirit/comment/like/_form.html')
def render_like_form(comme
|
nt, l
|
ike, next=None):
form = LikeForm()
return {'form': form, 'comment_id': comment.pk, 'like': like, 'next': next}
|
lyarwood/virt-deploy
|
virtdeploy/drivers/libvirt.py
|
Python
|
gpl-2.0
| 12,400
| 0
|
#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import json
import libvirt
import netaddr
import os
import os.path
import subprocess
from lxml import etree
from ..driverbase import VirtDeployDriverBase
from ..errors import InstanceNotFound
from ..errors import VirtDeployException
from ..utils import execute
from ..utils import random_password
DEFAULT_NET = 'default'
DEFAULT_POOL = 'default'
BASE_FORMAT = 'qcow2'
BASE_SIZE = '20G'
INSTANCE_DEFAULTS = {
'cpus': 2,
'memory': 1024,
'arch': 'x86_64',
'network': DEFAULT_NET,
'pool': DEFAULT_POOL,
'password': None,
}
_NET_ADD_LAST = libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST
_NET_MODIFY = libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY
_NET_DELETE = libvirt.VIR_NETWORK_UPDATE_COMMAND_DELETE
_NET_DNS_HOST = libvirt.VIR_NETWORK_SECTION_DNS_HOST
_NET_DHCP_HOST = libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST
_NET_UPDATE_FLAGS = (
libvirt.VIR_NETWORK_UPDATE_AFFECT_CONFIG |
libvirt.VIR_NETWORK_UPDATE_AFFECT_LIVE
)
_IMAGE_OS_TABLE = {
'centos-6': 'centos6.5', # TODO: fix versions
'centos-7.1': 'centos7.0', # TODO: fix versions
'centos-7.2': 'centos7.0', # TODO: fix versions
'rhel-6.7': 'rhel6', # TODO: fix versions
'rhel-7.2': 'rhel7', # TODO: fix versions
}
class VirtDeployLibvirtDriver(VirtDeployDriverBase):
def __init__(self, uri='qemu:///system'):
self._uri = uri
def _libvirt_open(self):
def libvirt_callback(ctx, err):
pass # add logging only when required
libvirt.registerErrorHandler(libvirt_callback, ctx=None)
return libvirt.open(self._uri)
def template_list(self):
templates = _get_virt_templates()
if templates['version'] != 1:
raise VirtDeployException('Unsupported template list version')
return [{'id': x['os-version'], 'name': x['full-name']}
for x in templates['templates']]
def instance_create(self, vmid, template, **kwargs):
kwargs = dict(INSTANCE_DEFAULTS.items() + kwargs.items())
name = '{0}-{1}-{2}'.format(vmid, template, kwargs['arch'])
image = '{0}.qcow2'.format(name)
conn = self._libvirt_open()
pool = conn.storagePoolLookupByName(kwargs['pool'])
net = conn.networkLookupByName(kwargs['network'])
repository = _get_pool_path(pool)
path = os.path.join(repository, image)
if os.path.exists(path):
raise OSError(os.errno.EEXIST, "Image already exists")
base = _create_base(template, kwargs['arch'], repository)
|
execute(('qemu-img', 'create', '-f', 'qcow2', '-b', base, image),
cwd=repository)
hostname = 'vm-{0}'.format(vmid)
|
domainname = _get_network_domainname(net)
if domainname is None:
fqdn = hostname
else:
fqdn = '{0}.{1}'.format(hostname, domainname)
if kwargs['password'] is None:
kwargs['password'] = random_password()
password_string = 'password:{0}'.format(kwargs['password'])
execute(('virt-customize',
'-a', path,
'--hostname', fqdn,
'--root-password', password_string))
network = 'network={0}'.format(kwargs['network'])
try:
conn.nwfilterLookupByName('clean-traffic')
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_NO_NWFILTER:
raise
else:
network += ',filterref=clean-traffic'
disk = 'path={0},format=qcow2,bus=scsi,discard=unmap'.format(path)
channel = 'unix,name=org.qemu.guest_agent.0'
execute(('virt-install',
'--quiet',
'--connect={0}'.format(self._uri),
'--name', name,
'--cpu', 'host-model-only,+vmx',
'--vcpus', str(kwargs['cpus']),
'--memory', str(kwargs['memory']),
'--controller', 'scsi,model=virtio-scsi',
'--disk', disk,
'--network', network,
'--graphics', 'spice',
'--channel', channel,
'--os-variant', _get_image_os(template),
'--import',
'--noautoconsole',
'--noreboot'))
netmac = _get_domain_mac_addresses(_get_domain(conn, name)).next()
ipaddress = _new_network_ipaddress(net)
# TODO: fix race between _new_network_ipaddress and ip reservation
_add_network_host(net, hostname, ipaddress)
_add_network_dhcp_host(net, hostname, netmac['mac'], ipaddress)
return {
'name': name,
'password': kwargs['password'],
'mac': netmac['mac'],
'hostname': fqdn,
'ipaddress': ipaddress,
}
def instance_address(self, vmid, network=None):
conn = self._libvirt_open()
dom = _get_domain(conn, vmid)
netmacs = _get_domain_macs_by_network(dom)
if network:
netmacs = {k: v for k, v in netmacs.iteritems()}
addresses = set()
for name, macs in netmacs.iteritems():
net = conn.networkLookupByName(name)
for lease in _get_network_dhcp_leases(net):
if lease['mac'] in macs:
addresses.add(lease['ip'])
return list(addresses)
def instance_start(self, vmid):
dom = _get_domain(self._libvirt_open(), vmid)
try:
dom.create()
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
def instance_stop(self, vmid):
dom = _get_domain(self._libvirt_open(), vmid)
try:
dom.shutdownFlags(
libvirt.VIR_DOMAIN_SHUTDOWN_GUEST_AGENT |
libvirt.VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN
)
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
def instance_delete(self, vmid):
conn = self._libvirt_open()
dom = _get_domain(conn, vmid)
try:
dom.destroy()
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
xmldesc = etree.fromstring(dom.XMLDesc())
for disk in xmldesc.iterfind('./devices/disk/source'):
try:
os.remove(disk.get('file'))
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
netmacs = _get_domain_macs_by_network(dom)
for network, macs in netmacs.iteritems():
net = conn.networkLookupByName(network)
for x in _get_network_dhcp_hosts(net):
if x['mac'] in macs:
_del_network_host(net, x['name'])
_del_network_dhcp_host(net, x['name'])
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
def _get_image_os(image):
try:
return _IMAGE_OS_TABLE[image]
except KeyError:
return image.replace('-', '')
def _create_base(template, arch, repository):
name = '_{0}-{1}.{2}'.format(template, arch, BASE_FORMAT)
path = os.path.join(repository, name)
|
iulian787/spack
|
var/spack/repos/builtin/packages/perl-devel-globaldestruction/package.py
|
Python
|
lgpl-2.1
| 641
| 0.00624
|
# Copyright 2013-2020 Lawrence Livermore National Security,
|
LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlDevelGlobaldestruction(PerlPackage):
"""Makes Perl's global destruction less tricky to deal with"""
homepage = "http://search.cpan.org/~haarg/Devel-GlobalDestruction-0.14/lib/Devel/GlobalDestruction.pm"
url = "http://search.cpan.org/CPAN/authors/id/H/HA/HAARG/Devel-Global
|
Destruction-0.14.tar.gz"
version('0.14', sha256='34b8a5f29991311468fe6913cadaba75fd5d2b0b3ee3bb41fe5b53efab9154ab')
|
SouthForkResearch/CHaMP_Metrics
|
tools/auxmetrics/metriclib/coverMetrics.py
|
Python
|
gpl-3.0
| 9,707
| 0.003709
|
from lib.loghelper import Logger
import numpy as np
def visitCoverMetrics(visitMetrics, visitobj):
visit = vi
|
sitobj['visit']
riparianStructures = visitobj['riparianStructures']
percentBigTreeCover(visitMetrics, riparianStructures)
percentCanopyNoCover(visitMetrics, riparianStructures)
percentGroundCover(visitMetrics, riparianStructures)
percentGroundCoverNoCover(visitMetrics, riparianStructures)
percentUnderstoryCover(visitMetrics, riparianStructures)
percentWoodyCover(visitMetrics, riparianStructures)
percentNonWoodyGroundCover(visitMetrics, visit, riparianStructures)
pe
|
rcentConiferousCover(visitMetrics, visit, riparianStructures)
def percentConiferousCover(visitMetrics, visit, riparianStructures):
if visit["iterationID"] == 1:
visitMetrics["PercentConiferousCover"] = getConiferousScore2011(riparianStructures)
else:
visitMetrics["PercentConiferousCover"] = getConiferousScore2012(riparianStructures)
def getConiferousScore2012(riparianStructures):
if riparianStructures is None:
return None
inScope = []
inScope.extend([s["value"]["LBCanopyWoodyConiferous"] + s["value"]["LBUnderstoryWoodyConiferous"] for s in riparianStructures["values"] if s["value"]["LBCanopyWoodyConiferous"] is not None and s["value"]["LBUnderstoryWoodyConiferous"] is not None])
inScope.extend([s["value"]["RBCanopyWoodyConiferous"] + s["value"]["RBUnderstoryWoodyConiferous"] for s in riparianStructures["values"] if s["value"]["RBCanopyWoodyConiferous"] is not None and s["value"]["RBUnderstoryWoodyConiferous"] is not None])
if inScope.__len__() > 0:
return np.mean(inScope)
else:
return None
def getConiferousScore2011(riparianStructures):
if riparianStructures is None:
return None
count = 0
result = 0
multiplicationFactors = {"Coniferous": 1, "Mixed": 0.5 }
for rec in [r for r in riparianStructures["values"]]:
if rec["value"]["LBCanopyBigTrees"] is not None and rec["value"]["LBCanopySmallTrees"] is not None and rec["value"]["LBCanopyVegetationType"] is not None:
lbfactor = 0
if rec["value"]["LBCanopyVegetationType"] in multiplicationFactors:
lbfactor = multiplicationFactors[rec["value"]["LBCanopyVegetationType"]]
lbunderstoryfactor = 0
if rec["value"]["LBUnderstoryVegetationType"] is not None and rec["value"]["LBUnderstoryVegetationType"] in multiplicationFactors:
lbunderstoryfactor = multiplicationFactors[rec["value"]["LBUnderstoryVegetationType"]]
result = result + (rec["value"]["LBCanopyBigTrees"] + rec["value"]["LBCanopySmallTrees"]) * lbfactor
lbunderstoryshrubs = 0
if rec["value"]["LBUnderstoryWoodyShrubs"] is not None:
lbunderstoryshrubs = rec["value"]["LBUnderstoryWoodyShrubs"]
result = result + (lbunderstoryshrubs * lbunderstoryfactor)
count = count + 1
if rec["value"]["RBCanopyBigTrees"] is not None and rec["value"]["RBCanopySmallTrees"] is not None and rec["value"]["RBCanopyVegetationType"] is not None:
rbfactor = 0
if rec["value"]["RBCanopyVegetationType"] in multiplicationFactors:
rbfactor = multiplicationFactors[rec["value"]["RBCanopyVegetationType"]]
rbunderstoryfactor = 0
if rec["value"]["RBUnderstoryVegetationType"] is not None and rec["value"]["RBUnderstoryVegetationType"] in multiplicationFactors:
rbunderstoryfactor = multiplicationFactors[rec["value"]["RBUnderstoryVegetationType"]]
result = result + (rec["value"]["RBCanopyBigTrees"] + rec["value"]["RBCanopySmallTrees"]) * rbfactor
rbunderstoryshrubs = 0
if rec["value"]["RBUnderstoryWoodyShrubs"] is not None:
rbunderstoryshrubs = rec["value"]["RBUnderstoryWoodyShrubs"]
result = result + (rbunderstoryshrubs * rbunderstoryfactor)
count = count + 1
if count == 0:
return None
return result / count
def percentBigTreeCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentBigTreeCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBCanopyBigTrees"] for s in riparianStructures["values"] if s["value"]["LBCanopyBigTrees"] is not None])
inScope.extend([s["value"]["RBCanopyBigTrees"] for s in riparianStructures["values"] if s["value"]["RBCanopyBigTrees"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentBigTreeCover"] = np.mean(inScope)
else:
visitMetrics["PercentBigTreeCover"] = None
def percentUnderstoryCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentUnderstoryNoCover"] = None
visitMetrics["PercentUnderstoryCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBUnderstoryCover"] for s in riparianStructures["values"] if s["value"]["LBUnderstoryCover"] is not None])
inScope.extend([s["value"]["RBUnderstoryCover"] for s in riparianStructures["values"] if s["value"]["RBUnderstoryCover"] is not None])
if inScope.__len__() > 0:
understoryCover = np.mean(inScope)
visitMetrics["PercentUnderstoryCover"] = understoryCover
visitMetrics["PercentUnderstoryNoCover"] = 100 - understoryCover
else:
visitMetrics["PercentUnderstoryCover"] = None
visitMetrics["PercentUnderstoryNoCover"] = None
def percentNonWoodyGroundCover(visitMetrics, visit, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentNonWoodyGroundCover"] = None
return
inScope = []
if visit["iterationID"] == 1:
inScope.extend([s["value"]["LBGroundcoverNonWoodyShrubs"] + s["value"]["LBUnderstoryNonWoodyShrubs"] for s in riparianStructures["values"] if s["value"]["LBGroundcoverNonWoodyShrubs"] is not None and s["value"]["LBUnderstoryNonWoodyShrubs"] is not None])
inScope.extend([s["value"]["RBGroundcoverNonWoodyShurbs"] + s["value"]["RBUnderstoryNonWoodyShrubs"] for s in riparianStructures["values"] if s["value"]["RBGroundcoverNonWoodyShurbs"] is not None and s["value"]["RBUnderstoryNonWoodyShrubs"] is not None])
else:
inScope.extend([s["value"]["LBUnderstoryNonWoodyForbesGrasses"] + s["value"]["LBGroundcoverNonWoodyForbesGrasses"] for s in riparianStructures["values"] if s["value"]["LBUnderstoryNonWoodyForbesGrasses"] is not None and s["value"]["LBGroundcoverNonWoodyForbesGrasses"] is not None])
inScope.extend([s["value"]["RBUnderstoryNonWoodyForbesGrasses"] + s["value"]["RBGroundcoverNonWoodyForbesGrasses"] for s in riparianStructures["values"] if s["value"]["RBUnderstoryNonWoodyForbesGrasses"] is not None and s["value"]["RBGroundcoverNonWoodyForbesGrasses"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentNonWoodyGroundCover"] = np.mean(inScope)
else:
visitMetrics["PercentNonWoodyGroundCover"] = None
def percentWoodyCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentWoodyCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBWoodyCover"] for s in riparianStructures["values"] if s["value"]["LBWoodyCover"] is not None])
inScope.extend([s["value"]["RBWoodyCover"] for s in riparianStructures["values"] if s["value"]["RBWoodyCover"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentWoodyCover"] = np.mean(inScope)
else:
visitMetrics["PercentWoodyCover"] = None
def percentGroundCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentGroundCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBGroundCover"] for s in riparianStructures["values"] if s["value"]["LBGroundCover"] is not None])
inScope.extend([s["value"]["RBGroundCover"] for s in riparianStructures["values"] if s["value"]["RBGroundCover"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentGroun
|
jimi-c/ansible
|
contrib/inventory/ec2.py
|
Python
|
gpl-3.0
| 72,916
| 0.002373
|
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
Optional region environment variable if region is 'auto'
This script also assumes that there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
You can filter for specific EC2 instances by creating an environment variable
named EC2_INSTANCE_FILTERS, which has the same format as the instance_filters
entry documented in ec2.ini. For example, to find all hosts whose name begins
with 'webserver', one might use:
export EC2_INSTANCE_FILTERS='tag:Name=webserver*'
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
-
|
ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_ty
|
pe
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
When destination_format and destination_format_tags are specified
the destination_format can be built from the instance tags and attributes.
The behavior will first check the user defined tags, then proceed to
check instance attributes, and finally if neither are found 'nil' will
be used instead.
'my_instance': {
'region': 'us-east-1', # attribute
'availability_zone': 'us-east-1a', # attribute
'private_dns_name': '172.31.0.1', # attribute
'ec2_tag_deployment': 'blue', # tag
'ec2_tag_clusterid': 'ansible', # tag
'ec2_tag_Name': 'webserver', # tag
...
}
Inside of the ec2.ini file the following settings are specified:
...
destination_format: {0}-{1}-{2}-{3}
destination_format_tags: Name,clusterid,deployment,private_dns_name
...
These settings would produce a destination_format as the following:
'webserver-ansible-blue-172.31.0.1'
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
from boto import sts
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3 # noqa
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
import json
DEFAULTS = {
'all_elasticache_clusters': 'False',
'all_elasticache_nodes': 'False',
'all_elasticache_replication_groups': 'False',
'all_instances': 'False',
'all_rds_instances': 'False',
'aws_access_key_id': None,
'aws_secret_access_key': None,
'aws_security_token': None,
'boto_profile': None,
'cache_max_age': '300',
'cache_path': '~/.ansible/tmp',
'destination_variable': 'public_dns_name',
'elasticache': 'True',
'eucalyptus': 'False',
'eucalyptus_host': None,
'expand_csv_tags': 'False',
'group_by_ami_id': 'True',
'group_by_availability_zone': 'True',
'group_by_aws_account': 'False',
'group_by_elasticache_cluster': 'True',
'group_by_elasticache_engine': 'True',
'group_by_elasticache_parameter_group': 'True',
'group_by_elasticache_replication_group': 'True',
'group_by_instance_id': 'True',
'group_by_instance_state': 'False',
'group_by_instance_type': 'True',
'group_by_key_pair': 'True',
'group_by_platform': 'True',
'group_by_rds_engine': 'True',
'group_by_rds_parameter_group': 'True',
'group_by_region': 'True',
'group_by_route53_names': 'True',
'group_by_security_group': 'True',
'group_by_tag_keys': 'True',
'group_by_tag_none': 'True',
'group_by_vpc_id': 'True',
'hostname_variable': None,
'iam_role': None,
'include_rds_clusters': 'False',
'nested_groups': 'False',
'pattern_exclude': None,
'pattern_include': None,
'rds': 'False',
'regions': 'all',
'regions_exclude': 'us-gov-west-1, cn-north-1',
'replace_dash_in_groups': 'True',
'route53': 'False',
'route53_excluded_zones': '',
'route53_hostnames': None,
'stack_filters': 'False',
'vpc_destination_variable': 'ip_address'
}
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
self.aws_account_id = None
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
|
meteoswiss-mdr/precipattractor
|
pymodules/time_tools_attractor.py
|
Python
|
gpl-3.0
| 12,112
| 0.011311
|
#!/usr/bin/env python
'''
Module to perform various time operations.
Documentation convention from https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
07.07.2016
Loris Foresti
'''
from __future__ import division
from __future__ import print_function
import datetime
import numpy as np
import time
import sys
import math
fmt1 = "%.1f"
fmt2 = "%.2f"
def timestring2datetime(timestring):
'''
Function to convert a time stamp string YYYYmmDDHHMMSS to a datetime object.
Parameters
----------
timestring : str
Time string YYYYmmDDHHMMSS
Returns
-------
timeDate: datetime
Datetime object
'''
#timeDate = datetime.datetime.strptime(timestring,'%Y%m%d%H%M%S')
timeDate = datetime.datetime(int(timestring[0:4]), int(timestring[4:6]), int(timestring[6:8]), int(timestring[8:10]),int(timestring[10:12]))
return(timeDate)
def datetime2timestring(timeDate):
'''
Function to convert datetime object to a time stamp string YYYYmmDDHHMMSS.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
timeString: str
Time string YYYYmmDDHHMMSS
'''
timeString = timeDate.strftime("%Y%m%d%H%M%S")
return(timeString)
def datetime2juliantimestring(timeDate, format='YYJJJHHMM'):
'''
Function to convert datetime object to a Julian time stamp string YYYYJJJHHMM.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
timeString: str
Time string YYYYJJJHHMM
'''
year, yearStr, julianDay, julianDayStr = parse_datetime(timeDate)
hour = timeDate.hour
minute = timeDate.minute
hourminStr = ('%02i' % hour) + ('%02i' % minute)
if format == 'YYYYJJJHHMM':
timeString = str(year) + julianDayStr + hourminStr
if format == 'YYJJJHHMM':
timeString = yearStr + julianDayStr + hourminStr
return(timeString)
def juliantimestring2datetime(timeString, format='YYJJJHHMM'):
'''
Function to convert Julian time stamp string to a datetime object.
Parameters
----------
timeString: str
Time string YYYYJJJHHMMSS
Returns
-------
timeDate : datetime
Datetime object
Note: julian day starts at 001 (i.e. January 1st)
'''
if format=='YYYYJJJHHMMSS':
if not len(timeString) == 13:
print("Not the right string length.")
sys.exit(1)
year = int(timeString[0:4])
day = int(timeString[4:7]) - 1
hour = int(timeString[7:9])
min = int(timeString[9:11])
sec = int(timeString[11:13])
totaldeltaDays = day + hour/24 + min/60/24 + sec/60/60/24
timeDate = datetime.datetime(year, 1, 1) + datetime.timedelta(days=totaldeltaDays)
elif format=='YYJJJHHMM':
if not len(timeString) == 9:
print("Not the right string length.")
sys.exit(1)
year = int(timeString[0:2])
if year > 80:
year = 1900 + year
else:
year = 2000 + year
day = int(timeString[2:5]) - 1
hour = int(timeString[5:7])
min = int(timeString[7:9])
totaldeltaDays = day + hour/24 + min/60/24
timeDate = datetime.datetime(year, 1, 1) + datetime.timedelta(days=totaldeltaDays)
else:
print("Julian time stamp string format not supported.")
sys.exit(1)
return(timeDate)
def juliantimestring2datetime_array(timeStampJulianArray, format='YYJJJHHMM', timeString=True):
'''
Same as above but for a list or array of time stamps.
'''
nrSamples = len(timeStampJulianArray)
# If not many samples...
if nrSamples < 1000000:
timeStampJulianArrayStr = np.array(map(lambda n: "%0.9i"%n, timeStampJulianArray))
timeStampJulianArrayDt = map(juliantimestring2datetime, timeStampJulianArrayStr)
if timeString == True:
timeStampArrayStr = map(datetime2timestring, timeStampJulianArrayDt)
else:
timeStampArrayStr = []
return(timeStampJulianArrayDt, timeStampArrayStr)
else:
# If a lot of samples
timeStampJulianSet = np.unique(timeStampJulianArray)
nrUniqueSamples = len(timeStampJulianSet)
print(nrSamples, nrUniqueSamples)
timeStampDt = np.empty((nrSamples,), dtype='datetime64[m]')
timeStampStr = np.empty((nrSamples,), dtype='S12')
# Do the operations over the unique time stamps
for i in range(0,nrUniqueSamples):
timeStampJulianStr = "%0.9i"% timeStampJulianSet[i]
dt = juliantimestring2datetime(timeStampJulianStr, format=format)
bool = (timeStampJulianArray == timeStampJulianSet[i])
# Set values in array
timeStampDt[bool] = dt
if timeString == True:
dtStr = datetime2timestring(dt)
timeStampStr[bool] = dtStr
# Print out advancement (for large arrays)
if ((i % 100) == 0):
print(fmt1 % (i/nrUniqueSamples*100),"%")
return(timeStampDt, timeStampStr)
def get_julianday(timeDate):
'''
|
Get Julian day from datetime object.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
julianDay: int
Julian day
'''
julianDay = timeDate.timetuple().tm_yday
return(julianDay)
def parse_datetime(timeDate):
'''
Function to parse a datetime object and return the year and Julian day in integer and string formats.
Parameters
----------
timeDate : datetime
Dateti
|
me object
Returns
-------
year: int
Year
yearStr: str
Year string in YY
julianDay: int
Julian day
julianDayStr: str
Julian day string JJJ
'''
year = timeDate.year
yearStr = str(year)[2:4]
julianDay = get_julianday(timeDate)
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
return(year, yearStr, julianDay, julianDayStr)
def timestring_array2datetime_array(arrayTimeStampsStr):
'''
Function to convert a list array of time strings YYYYmmDDHHMMSS
into a list of datetime objects
Parameters
----------
arrayTimeStampsStr : list(str)
List of time strings YYYYmmDDHHMMSS
Returns
-------
arrayTimeStampsDt: list(datetime)
List of datetime objects
'''
timeStamps = np.array(arrayTimeStampsStr, dtype=int)
timeStampsStr = np.array(list(map(str,timeStamps)))
arrayTimeStampsDt = []
for t in range(0,len(arrayTimeStampsStr)):
timeDate = timestring2datetime(str(timeStampsStr[t]))
arrayTimeStampsDt.append(timeDate)
return(arrayTimeStampsDt)
def juliantimeInt2juliantimeStr(juliantimeInt):
'''
50010000 -> '050010000'
'''
timeStampJulianStr = map(lambda x: '%09i' % x, juliantimeInt)
return(timeStampJulianStr)
def year2digit_to_year4digit(arrayYear2digit):
'''
Function to convert an array of year strings YY into an array of year strings YYYY
'''
arrayYear2digit = np.array(arrayYear2digit, dtype=int)
arrayYear4digit = np.array(arrayYear2digit)
bool80 = (arrayYear2digit > 80)
arrayYear4digit[bool80] = arrayYear2digit[bool80] + 1900
arrayYear4digit[~bool80] = arrayYear2digit[~bool80] + 2000
return(arrayYear4digit)
def get_HHmm_str(hour, minute):
'''
Function to concatenate hours and minutes into a 4-digit string.
Parameters
----------
hour : int
minute: int
Returns
-------
hourminStr: str
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tests/test_rplot.py
|
Python
|
artistic-2.0
| 11,560
| 0.001298
|
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, laye
|
r2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='Se
|
palWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = rplot.GeomPolyFit(2)
self.layers = rplot.sequence_layers([layer1, layer2, layer3])
self.trellis1 = rplot.TrellisGrid(['sex', 'smoker'])
self.trellis2 = rplot.TrellisGrid(['sex', '.'])
self.trellis3 = rplot.TrellisGrid(['.', 'smoker'])
self.trellised1 = self.trellis1.trellis(self.layers)
self.trellised2 = self.trellis2.trellis(self.layers)
self.trellised3 = self.trellis3.trellis(self.layers)
def test_grid_sizes(self):
self.assertEqual(len(self.trellised1), 3)
self.assertEqual(len(self.trellised2), 3)
self.assertEqual(len(self.trellised3), 3)
self.assertEqual(len(self.trellised1[0]), 2)
self.assertEqual(len(self.trellised1[0][0]), 2)
self.assertEqual(len(self.trellised2[0]), 2)
self.assertEqual(len(self.trellised2[0][0]), 1)
self.assertEqual(len(self.trellised3[0]), 1)
self.assertEqual(len(self.trellised3[0][0]), 2)
self.assertEqual(len(self.trellised1[1]), 2)
self.assertEqual(len(self.trellised1[1][0]), 2)
self.assertEqual(len(self.trellised2[1]), 2)
self.assertEqual(len(self.trellised2[1][0]), 1)
self.assertEqual(len(self.trellised3[1]), 1)
self.assertEqual(len(self.trellised3[1][0]), 2)
self.assertEqual(len(self.trellised1[2]), 2)
self.assertEqual(len(self.trellised1[2][0]), 2)
self.assertEqual(len(self.trellised2[2]), 2)
self.assertEqual(len(self.trellised2[2][0]), 1)
self.assertEqual(len(self.trellised3[2]), 1)
self.assertEqual(len(self.trellised3[2][0]), 2)
def test_trellis_cols_rows(self):
self.assertEqual(self.trellis1.cols, 2)
self.assertEqual(self.trellis1.rows, 2)
self.assertEqual(self.trellis2.cols, 1)
self.assertEqual(self.trellis2.rows, 2)
self.assertEqual(self.trellis3.cols, 2)
self.assertEqual(self.trellis3.rows, 1)
@tm.mplskip
class TestScaleGradient(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient("SepalLength", colour1=(0.2, 0.3,
0.4),
colour2=(0.8, 0.7, 0.6))
def test_gradient(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
@tm.mplskip
class TestScaleGradient2(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient2("SepalLength", colour1=(0.2, 0.3, 0.4), colour2=(0.8, 0.7, 0.6), colour3=(0.5, 0.5, 0.5))
def test_gradient2(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
r3, g3, b3 = self.gradient.colour3
value = row[self.gradient.column]
a_ = min(self.data[self.gradient.column])
b_ = max(self.data[self.gradient.column])
scaled = (value - a_) / (b_ - a_)
if scaled < 0.5:
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
else:
self.assertTrue(between(r2, r3, r))
self.assertTrue(between(g2, g3, g))
self.assertTrue(between(b2, b3, b))
@tm.mplskip
class TestScaleRandomColour(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.colour = rplot.ScaleRandomColour('SepalLength')
def test_random_colour(self):
for index in range(len(self.data)):
colour = self.colour(self.data, index)
|
mvs-live/metaverse
|
test/test-rpc-v3/TestCase/Account/batch_account.py
|
Python
|
agpl-3.0
| 2,352
| 0.006378
|
from utils import common, database
from TestCase.MVSTestCase import *
class TestAccount(MVSTestCaseBase):
roles = ()
need_mine = False
def test_0_new_account(self):
'''create new account * 5000'''
account_table_file = '/home/%s/.metaverse/mainnet/account_table' % common.get_username()
origin_payload_size = database.get_payload_size(account_table_file)
batch_amount = 5000
lastwords = []
for i in range(batch_amount):
ec, message = mvs_rpc.new_account("Account_%s" % i, "123456")
self.assertEqual(ec, 0, message)
lastwords.append( message[-1] )
try:
current_payload_size = database.get_payload_size(account_table_file)
# each simple account record size < 300, but when getnew address, the account record will be create twice, so 600 is the reasonable record size.
self.assertGreater(600 * batch_amount, current_payload_size - origin_payload_size, "each account record size shall be less than 600.")
finally:
for i in range(batch_amount):
ec, message = mvs_rpc.delete_account("Account_%s" % i, "123456", lastwords[i])
self.assertEqual(ec,
|
0, message)
def test_1_new_address(self):
|
'''new address for Zac'''
max_duration = 0.01
avg_duration = 0.002
round = 5000
Zac.create()
account_table_file = '/home/%s/.metaverse/mainnet/account_table' % common.get_username()
try:
origin_payload_size = database.get_payload_size(account_table_file)
durations = []
for i in range(round):
duration, ret = common.duration_call(mvs_rpc.new_address, Zac.name, Zac.password)
self.assertEqual(ret[0], 0, "mvs_rpc.new_address failed!")
self.assertLess(duration, max_duration)
durations.append(duration)
self.assertLess(sum(durations), avg_duration*round)
current_payload_size = database.get_payload_size(account_table_file)
# each simple account record size < 300
self.assertGreater(300 * round, current_payload_size - origin_payload_size,
"each account record size shall be less than 300.")
finally:
Zac.delete()
|
ubiquitypress/rua
|
src/submission/logic.py
|
Python
|
gpl-2.0
| 2,061
| 0
|
from django.core.exceptions import PermissionDenied
from core.models import Author, Editor
def copy_author_to_submission(user, book):
author = Author(
first_name=user.first_name,
middle_name=user.profile.middle_name,
last_name=user.last_name,
salutation=user.profile.salutation,
institution=user.profile.institution,
department=user.profile.department,
country=user.profile.country,
author_email=user.email,
biography=user.profile.biography,
orcid=user.profile.orcid,
twitter=user.profile.twitter,
linkedin=user.profile.linkedin,
facebook=user.profile.facebook,
)
author.save
|
()
book.author.add(author)
return author
def copy_editor_to_submission(user, book):
editor = Editor(
first_name=user.first_name,
middle_name=user.profile.middle_name,
last_name=user.last_name,
salutation=user.profile.salutation,
institution=user.profile.institution,
depart
|
ment=user.profile.department,
country=user.profile.country,
author_email=user.email,
biography=user.profile.biography,
orcid=user.profile.orcid,
twitter=user.profile.twitter,
linkedin=user.profile.linkedin,
facebook=user.profile.facebook,
)
editor.save()
book.editor.add(editor)
return editor
def check_stage(book, check):
if book.submission_stage >= check:
pass
elif book.submission_date:
raise PermissionDenied()
else:
raise PermissionDenied()
def handle_book_labels(post, book, kind):
for _file in book.files.all():
if _file.kind == kind and post.get("%s" % _file.id, None):
_file.label = post.get("%s" % _file.id)
_file.save()
def handle_copyedit_author_labels(post, copyedit, kind):
for _file in copyedit.author_files.all():
if _file.kind == kind and post.get("%s" % _file.id, None):
_file.label = post.get("%s" % _file.id)
_file.save()
|
hubert667/AIR
|
build/scripts-2.7/learning-experiment.py
|
Python
|
gpl-3.0
| 860
| 0.002326
|
#!/usr/bin/python
# This file is part of L
|
erot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the ter
|
ms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
try:
from include import *
except:
pass
from experiment import GenericExperiment
if __name__ == "__main__":
experiment = GenericExperiment()
experiment.run()
|
geopython/pywps
|
pywps/processing/job.py
|
Python
|
mit
| 4,609
| 0.001519
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import os
import tempfile
import pywps.configuration as config
from pywps import Process, WPSRequest
from pywps.response.execute import ExecuteResponse
import json
import logging
LOGGER = logging.getLogger("PYWPS")
class Job(object):
"""
:class:`Job` represents a processing job.
"""
def __init__(self, pr
|
ocess, wps_request, wps_response):
self.process = process
self.method = '_run_process'
self.wps_request = wps_request
self.wps_response = wps_response
@property
def name(self):
return self.process.identifier
@property
def workdir(self):
return self.process.workdir
@property
def uuid(self):
|
return self.process.uuid
@property
def json(self):
"""Return JSON encoded representation of the request
"""
obj = {
'process': self.process.json,
'wps_request': self.wps_request.json,
}
return json.dumps(obj, allow_nan=False)
@classmethod
def from_json(cls, value):
"""init this request from json back again
:param value: the json (not string) representation
"""
process = Process.from_json(value['process'])
wps_request = WPSRequest()
wps_request.json = json.loads(value['wps_request'])
wps_response = ExecuteResponse(
wps_request=wps_request,
uuid=process.uuid,
process=process)
wps_response.store_status_file = True
new_job = Job(
process=Process.from_json(value['process']),
wps_request=wps_request,
wps_response=wps_response)
return new_job
def dump(self):
LOGGER.debug('dump job ...')
filename = tempfile.mkstemp(prefix='job_', suffix='.dump', dir=self.workdir)[1]
with open(filename, 'w') as fp:
fp.write(self.json)
LOGGER.debug("dumped job status to {}".format(filename))
return filename
return None
@classmethod
def load(cls, filename):
LOGGER.debug('load job ...')
with open(filename, 'r') as fp:
job = Job.from_json(json.load(fp))
return job
return None
def run(self):
getattr(self.process, self.method)(self.wps_request, self.wps_response)
class JobLauncher(object):
"""
:class:`JobLauncher` is a command line tool to launch a job from a file
with a dumped job state.
Example call: ``joblauncher -c /etc/pywps.cfg job-1001.dump``
"""
def create_parser(self):
import argparse
parser = argparse.ArgumentParser(prog="joblauncher")
parser.add_argument("-c", "--config", help="Path to pywps configuration.")
parser.add_argument("filename", help="File with dumped pywps job object.")
return parser
def run(self, args):
if args.config:
LOGGER.debug("using pywps_cfg={}".format(args.config))
os.environ['PYWPS_CFG'] = args.config
self._run_job(args.filename)
def _run_job(self, filename):
job = Job.load(filename)
# init config
if 'PYWPS_CFG' in os.environ:
config.load_configuration(os.environ['PYWPS_CFG'])
# update PATH
os.environ['PATH'] = "{0}:{1}".format(
config.get_config_value('processing', 'path'),
os.environ.get('PATH'))
# cd into workdir
os.chdir(job.workdir)
# init logger ... code copied from app.Service
if config.get_config_value('logging', 'file') and config.get_config_value('logging', 'level'):
LOGGER.setLevel(getattr(logging, config.get_config_value('logging', 'level')))
if not LOGGER.handlers: # hasHandlers in Python 3.x
fh = logging.FileHandler(config.get_config_value('logging', 'file'))
fh.setFormatter(logging.Formatter(config.get_config_value('logging', 'format')))
LOGGER.addHandler(fh)
else: # NullHandler
if not LOGGER.handlers:
LOGGER.addHandler(logging.NullHandler())
job.run()
def launcher():
"""
Run job launcher command line.
"""
job_launcher = JobLauncher()
parser = job_launcher.create_parser()
args = parser.parse_args()
job_launcher.run(args)
|
bsmithyman/galoshes
|
setup.py
|
Python
|
mit
| 1,497
| 0.032732
|
'''Galoshes
'''
from distutils.core import setup
from setuptools import find_packages
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Natural Language :: English',
]
with open('README.md') as fp:
LONG_DESCRIPTION = ''.join(fp.readlines())
setup(
name = 'galoshes',
version = '0.2.3',
packages = find_packages(),
|
install_requires = ['numpy>=1.7',
'future',
],
author = 'Brendan Smithyman',
author_email = 'brendan@bitsmithy.net',
description = 'galoshes',
long_description = LONG_DESCRIPTION,
lic
|
ense = 'MIT',
keywords = 'dictionary class attribute',
url = 'https://github.com/bsmithyman/galoshes',
download_url = 'https://github.com/bsmithyman/galoshes',
classifiers = CLASSIFIERS,
platforms = ['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
use_2to3 = False,
)
|
seagatesoft/dateparser
|
tests/test_languages.py
|
Python
|
bsd-3-clause
| 18,881
| 0.0024
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from nose_parameterized import parameterized, param
from dateparser.languages import default_language_loader, Language
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from tests import BaseTestCase
class TestBundledLanguages(BaseTestCase):
def setUp(self):
super(TestBundledLanguages, self).setUp()
self.language = NotImplemented
self.datetime_string = NotImplemented
self.translation = NotImplemented
self.tokens = NotImplemented
self.result = NotImplemented
@parameterized.expand([
param('en', "Sep 03 2014", "september 03 2014"),
param('en', "friday, 03 september 2014", "friday 03 september 2014"),
# Chinese
param('cn', "1年11个月", "1 year 11 month"),
# French
param('fr', "20 Février 2012", "20 february 2012"),
param('fr', "Mercredi 19 Novembre 2013", "wednesday 19 november 2013"),
param('fr', "18 octobre 2012 à 19 h 21 min", "18 october 2012 19:21"),
# German
param('de', "29. Juni 2007", "29. june 2007"),
param('de', "Montag 5 Januar, 2015", "monday 5 january 2015"),
# Spanish
param('es', "Miércoles 31 Diciembre 2014", "wednesday 31 december 2014"),
# Italian
param('it', "Giovedi Maggio 29 2013", "thursday may 29 2013"),
param('it', "19 Luglio 2013", "19 july 2013"),
# Portuguese
param('pt', "22 de dezembro de 2014 às 02:38", "22 december 2014 02:38"),
# Russian
param('ru', "5 августа 2014 г. в 12:00", "5 august 2014 year 12:00"),
# Turkish
param('tr', "2 Ocak 2015 Cuma, 16:49", "2 january 2015 friday 16:49"),
# Czech
param('cz', "22. prosinec 2014 v 2:38", "22. december 2014 2:38"),
# Dutch
param('nl', "maandag 22 december 2014 om 2:38", "monday 22 december 2014 2:38"),
# Romanian
param('ro', "22 Decembrie 2014 la 02:38", "22 december 2014 02:38"),
# Polish
param('pl', "4 stycznia o 13:50", "4 january 13:50"),
param('pl', "29 listopada 2014 o 08:40", "29 november 2014 08:40"),
# Ukrainian
param('uk', "30 листопада 2013 о 04:27", "30 november 2013 04:27"),
# Belarusian
param('by', "5 снежня 2015 г. у 12:00", "5 december 2015 year 12:00"),
param('by', "11 верасня 2015 г. у 12:11", "11 september 2015 year 12:11"),
param('by', "3 стд 2015 г. у 10:33", "3 january 2015 year 10:33"),
# Arabic
param('ar', "6 يناير، 2015، الساعة 05:16 مساءً", "6 january 2015 05:16 pm"),
param('ar', "7 يناير، 2015، الساعة 11:00 صباحاً", "7 january 2015 11:00 am"),
# Vietnamese
param('vi', "Thứ Năm, ngày 8 tháng 1 năm 2015", "thursday 8 january 2015"),
param('vi', "Thứ Tư, 07/01/2015 | 22:34", "wednesday 07/01/2015 22:34"),
param('vi', "9 Tháng 1 2015 lúc 15:08", "9 january 2015 15:08"),
# Thai
param('th', "เมื่อ กุมภาพันธ์ 09, 2015, 09:27:57 AM", "february 09 2015 09:27:57 am"),
param('th', "เมื่อ กรกฎาคม 05, 2012, 01:18:06 AM", "july 05 2012 01:18:06 am"),
# Filipino
param('ph', "Biyernes Hulyo 3, 2015", "friday july 3 2015"),
param('ph', "Pebrero 5, 2015 7:00 pm", "february 5 2015 7:00 pm"),
# Miscellaneous
param('en', "2014-12-12T12:33:39-08:00", "2014-12-12 12:33:39-08:00"),
param('en', "2014-10-15T16:12:20+00:00", "2014-10-15 16:12:20+00:00"),
param('en', "28 Oct 2014 16:39:01 +0000", "28 october 2014 16:39:01 +0000"),
param('es', "13 Febrero 2015 a las 23:00", "13 february 2015 23:00")
])
def test_translation(self, shortname, datetime_string, expected_translation):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_translated()
self.then_string_translated_to(expected_translation)
@parameterized.expand([
# English
param('en', "yesterday", "1 day"),
param('en', "today", "0 day"),
param('en', "day before yesterday", "2 day"),
param('en', "last month", "1 month"),
param('en', "less than a minute ago", "45 second"),
# German
param('de', "vorgestern", "2 day"),
param('de', "heute", "0 day"),
param('de', "vor 3 Stunden", "ago 3 hour"),
# French
param('fr', "avant-hier", "2 day"),
param('fr', "hier", "1 day"),
param('fr', "aujourd'hui", "0 day"),
# Spanish
param('es', "anteayer", "2 day"),
param('es', "ayer", "1 day"),
param('es', "ayer a las", "1 day "),
param('es', "hoy", "0 day"),
param('es', "hace un horas", "ago 1 hour"),
param('es', "2 semanas", "2 week"),
param('es', "2 año", "2 year"),
# Italian
param('it', "altro ieri", "2 day"),
param('it', "ieri", "1 day"),
param('it', "oggi", "0 day"),
param('it', "2 settimana fa", "2 week ago"),
param('it', "2 anno fa", "2 year ago"),
# Portuguese
param('pt', "anteontem", "2 day"),
param('pt', "ontem", "1 day"),
param('pt', "hoje", "0 day"),
param('pt', "56 minutos", "56 minute"),
param('pt', "12 dias", "12 day"),
param('pt', "há 14 min.", "ago 14 minute."),
# Russian
param('ru', "9 месяцев", "9 month"),
param('ru', "8 недели", "8 week"),
param('ru', "7 года", "7 year"),
param('ru', "вчера", "1 day"),
param('ru', "сегодня", "0 day"),
param('ru', "несколько секунд", "44 second"),
# Turkish
param('tr', "dün", "1 day"),
param('tr', "22 dakika", "22 minute"),
param('tr', "12 hafta", "12 week"),
param('tr', "13 yıl", "13 year"),
# Czech
param('cz', "40 sekunda", "40 second"),
param('cz', "4 týden", "4 week"),
param('cz', "14 roků", "14 year"),
# Chinese
param('cn', "昨天", "1 day"),
param('cn', "前天", "2 day"),
param('cn', "50 秒", "50 second"),
param('cn', "7 周", "7 week"),
param('cn', "12 年", "12 year"),
# Dutch
param('nl', "17 uur geleden", "17 hour ago"),
param('nl', "27 jaar geleden", "27 year ago"),
param('nl', "45 minuten", "45 minute"),
# Romanian
param('ro', "23 săptămâni în urmă", "23 week ago"),
param('ro', "23 săptămâni", "23 week"),
param('ro', "13 oră", "13 hour"),
# Arabic
param('ar', "يومين", "2 day"),
param('ar', "أمس", "1 day"),
param('ar', "4 عام", "4 year"),
|
param('ar', "منذ 2 ساعات", "ago 2 hour"),
param('ar', "منذ ساعتين", "ago 2 hour"),
# Polish
param('pl', "2 godz.", "2 hour"),
param('pl', "Wczoraj o 07:40", "1 day 07:40"),
param('pl', "Poniedziałek 8:10 pm", "monday 8:10 pm"),
# Vietnamese
param('
|
vi', "2 tuần 3 ngày", "2 week 3 day"),
param('vi', "21 giờ trước", "21 hour ago"),
param('vi', "Hôm qua 08:16", "1 day 08:16"),
param('vi', "Hôm nay 15:39", "0 day 15:39"),
#French
param('fr', u"Il y a moins d'une minute", "ago 1 minute"),
param('fr', u"Il y a moins de 30s", "ago 30 s"),
#Filipino
param('ph', "kahapon", "1 day"),
param('ph', "ngayon", "0 second"),
# Belarusian
param('by', "9 месяцаў", "9 month"),
param('by', "8 тыдняў", "8 week"),
param('by', "1 тыдзень", "1 week"),
param('by', "2 года", "2 year"),
param('by', "3 гады", "3 year"),
param('by', "11 секунд", "11 second"),
param('by', "учора", "1 day"),
param('by', "пазаўчора", "2 day"),
param('by', "сёння", "0 day"),
param('by', "некалькі хвілін", "2 minute"),
])
def test_freshness_translation(self, shortname, datetime_string, expected_translation):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string
|
haxwithaxe/ddp
|
examples/xrproxy_server.py
|
Python
|
bsd-3-clause
| 11,364
| 0.027455
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OU
|
T OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
from danlog import DanLo
|
g
from ddp import *
import os
import pickle
import sys
import xmlrpclib
from xml.dom import minidom
###########
# Globals #
###########
client_callsign = ""
log = DanLog("XRProxyServer")
#############
# Constants #
#############
ALLOW_UNSIGNED_PACKETS = False
BACKEND_DATAMODE = "PSK500R"
BACKEND_HOSTNAME = "localhost"
BACKEND_PORT = 7362
DEBUG_MODE = False
DISABLE_CRYPTO = False
SPECIFICATION = 0
USE_TCP = 0
XMLRPC_SERVER = "http://127.0.0.1:7397/xmlrpc/"
XML_SETTINGS_FILE = "xrproxyserver-settings.xml"
###############
# Subroutines #
###############
def cBool(value):
if str(value).lower() == "false" or str(value) == "0":
return False
elif str(value).lower() == "true" or str(value) == "1":
return True
else:
return False
def exitProgram():
sys.exit(0)
def main():
global client_callsign
log.info("""
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
""")
log.info("")
log.info("XMLRPC Proxy - Server")
log.info("=====================")
log.info("Checking settings...")
if os.path.exists(XML_SETTINGS_FILE) == False:
log.warn("The XML settings file doesn't exist, create one...")
xmlXRPSettingsWrite()
log.info("The XML settings file has been created using the default settings. Please edit it and restart the XMLRPC proxy server once you're happy with the settings.")
exitProgram()
else:
log.info("Reading XML settings...")
xmlXRPSettingsRead()
# This will ensure it will have any new settings in
if os.path.exists(XML_SETTINGS_FILE + ".bak"):
os.unlink(XML_SETTINGS_FILE + ".bak")
os.rename(XML_SETTINGS_FILE, XML_SETTINGS_FILE + ".bak")
xmlXRPSettingsWrite()
log.info("Setting up DDP...")
ddp = DDP(hostname = BACKEND_HOSTNAME, port = BACKEND_PORT, data_mode = BACKEND_DATAMODE, timeout = 60., ack_timeout = 30., tx_hangtime = 1.25, data_length = 1024, specification = SPECIFICATION, disable_ec = False, disable_crypto = DISABLE_CRYPTO, allow_unsigned_packets = ALLOW_UNSIGNED_PACKETS, application = "DDP Example: XMLRPC Proxy", ignore_broadcast_packets = True, debug_mode = DEBUG_MODE)
log.info("")
while client_callsign == "":
print "Please enter your callsign: ",
client_callsign = readInput().strip().upper()
log.info("")
ddp.setCallsign(client_callsign)
log.info("Waiting for a packet...")
while True:
try:
data = ddp.receiveDataFromAny("XMLRPC")
if data is not None:
# Check the flags
d = data[0]
packet = data[1]
# Send the query off to the XMLRPC server
log.info("A XMLRPC packet has arrived, forwarding it on...")
call = pickle.loads(d)
s = xmlrpclib.ServerProxy(XMLRPC_SERVER)
t = getattr(s, call[0])
args = call[1]
tosend = pickle.dumps(t(*args), protocol = 2)
s = None
# Send the results back to the client
log.info("Transmitting the results back to the client...")
ddp.transmitData("XMLRPC", "", packet[ddp.SECTION_SOURC
|
qedi-r/home-assistant
|
homeassistant/components/conversation/agent.py
|
Python
|
apache-2.0
| 714
| 0
|
"""Agent foundation for conversation integration."""
from abc import ABC, abstractmethod
from typing import Optional
from homeassistant.helpers import intent
class AbstractConversationAgent(ABC):
"""Abstract conversation agent."""
@property
def attribution(self):
"""Return the attributio
|
n."""
return None
async def async_get_onboarding(self):
"""Get onboard data."""
return None
async def async_set_onboarding(self, shown):
"""Set onboard data."""
return True
@abstractmethod
async def async_process(
self, text: str, conversation_id: Optional[str] = None
) -> intent.IntentRespo
|
nse:
"""Process a sentence."""
|
smmribeiro/intellij-community
|
python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/withOneTarget_after.py
|
Python
|
apache-2.0
| 56
| 0.017857
|
def main():
|
with open('file.txt'):
pri
|
nt(42)
|
rdthomson/set09103
|
src/LPHW/ex5.py
|
Python
|
gpl-3.0
| 596
| 0.008389
|
my_name = 'Zed A. Shaw'
my_age = 35 # not a lie
my_height = 74 # Inches
my_weight = 180 # lbs
my_eyes = 'Blue'
my_tee
|
th = 'White'
my_hair = 'Brown'
print "Let's talk about %s." % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy"
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His t
|
eeth are usually %s depending on the coffee." % my_teeth
# this line is tricky, try to get it exactly right
print" If I add %d, %d and %d I get %d." % (my_age, my_height, my_weight, my_age + my_height + my_weight)
|
rfhk/awo-custom
|
sale_line_quant_extended/models/stock_move.py
|
Python
|
lgpl-3.0
| 10,584
| 0.005574
|
# -*- coding: utf-8 -*-
# Copyright 2015-2017 Quartile Limted
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
class StockMove(models.Model):
_inherit = "stock.move"
pick_partner_id = fields.Many2one(
related='picking_id.partner_id',
store=True,
readonly=True,
string='Pick Partner'
)
picking_type_code = fields.Selection(
related='picking_type_id.code',
store=True,
readonly=True,
string='Picking Type Code'
)
quant_lot_id = fields.Many2one(
'stock.production.lot',
compute='_get_quant_info',
store=True,
readonly=True,
string='Case No.'
)
quant_owner_id = fields.Many2one(
'res.partner',
compute='_get_quant_info',
store=True,
readonly=True,
string='Owner'
)
so_id = fields.Many2one(
'sale.order',
compute='_get_vals',
store=True,
readonly=True,
string='SO'
)
po_id = fields.Many2one(
'purchase.order',
compute='_get_vals',
store=True,
readonly=True,
string='PO'
)
is_mto = fields.Boolean('Make to Order',
compute='_compute_mto',
store=True,
)
@api.multi
def name_get(self):
res = []
for line in self:
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
pick_rec = self.env['stock.picking'].search(
[('name','=',line.picking_id.origin)])
if pick_rec.picking_type_id.code == 'incoming':
name = line.picking_id.name + '/ ' + name
else:
name = line.picking_id.origin + '/
|
' + name
res.append((line.id, name))
return res
@api.multi
@api.depends('quant_ids', 'reserved_quant_ids', 'lot_id')
def _g
|
et_quant_info(self):
for m in self:
if m.quant_ids:
m.quant_lot_id = m.quant_ids[0].lot_id and \
m.quant_ids[0].lot_id.id
m.quant_owner_id = m.quant_ids[0].owner_id and \
m.quant_ids[0].owner_id.id
elif m.reserved_quant_ids:
m.quant_lot_id = m.reserved_quant_ids[0].lot_id and \
m.reserved_quant_ids[0].lot_id.id
m.quant_owner_id = m.reserved_quant_ids[0].owner_id and \
m.reserved_quant_ids[0].owner_id.id
else:
m.quant_lot_id = m.lot_id.id
# below part does not work since quant is generated after
# this step
# if m.lot_id.quant_ids:
# m.quant_owner_id = m.lot_id.quant_ids[-1].owner_id and \
# m.lot_id.quant_ids[-1].owner_id.owner_id.id
def _get_quant_info_init(self, cr, uid):
# update quant info when installing/upgrading
cr.execute("""
update stock_move m1
set quant_lot_id = lot, quant_owner_id = owner
from (select q.lot_id as lot, q.owner_id as owner, m2.id as id
from stock_quant q
join stock_move m2 on q.reservation_id = m2.id) as subq
where m1.id = subq.id
and quant_lot_id is null
""")
@api.multi
@api.depends('origin')
def _get_vals(self):
SO = self.env['sale.order']
PO = self.env['purchase.order']
for m in self:
m.so_id, m.po_id = 0, 0
if m.purchase_line_id:
m.po_id = m.purchase_line_id.order_id.id
elif m.procurement_id and m.procurement_id.sale_line_id:
m.so_id = m.procurement_id.sale_line_id.order_id.id
@api.one
@api.depends('procurement_id', 'purchase_line_id')
def _compute_mto(self):
if self.code == 'outgoing' and self.procurement_id and \
self.procurement_id.sale_line_id:
self.is_mto = self.procurement_id.sale_line_id.mto
elif self.code == 'incoming' and self.purchase_line_id:
self.is_mto = self.purchase_line_id.mto
# def init(self, cr):
# move_ids = self.search(cr, SUPERUSER_ID, [])
# for m in self.browse(cr, SUPERUSER_ID, move_ids):
# m.pick_partner_id = m.picking_id.partner_id and m.picking_id.partner_id.id
# if m.quant_ids:
# m.quant_lot_id = m.quant_ids[0].lot_id and m.quant_ids[0].lot_id.id
# m.quant_owner_id = m.quant_ids[0].owner_id and m.quant_ids[0].owner_id.id
@api.model
def _prepare_picking_assign(self, move):
res = super(StockMove, self)._prepare_picking_assign(move)
res['is_mto'] = move.is_mto
return res
def action_assign(self, cr, uid, ids, context=None):
# NEED TO OVERRIDE COMPLETE METHOD SINCE LOGIC WAS IN BETWEEN THE
# LINES. SEE #oscg TAG FOR CHANGES DONE ON THIS METHOD.
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
to_assign_moves = []
main_domain = {}
todo_moves = []
operations = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.append(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.append(move.id)
continue
else:
todo_moves.append(move)
#we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
# oscg add
# this is to prevent reserving quants that are taken by
# quotations for supplier return outgoing move
if move.location_dest_id.usage == 'supplier':
main_domain[move.id] += [('sale_id', '=', False)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
domain = main_domain[move.id] + self.poo
|
oblique-labs/pyVM
|
rpython/translator/backendopt/escape.py
|
Python
|
mit
| 12,552
| 0.001514
|
from rpython.flowspace.model import Variable
from rpython.rtyper.lltypesystem import lltype
from rpython.translator.simplify import get_graph
from rpython.tool.uid import uid
class CreationPoint(object):
def __init__(self, creation_method, TYPE, op=None):
self.escapes = False
self.returns = False
self.creation_method = creation_method
if creation_method == "constant":
self.escapes = True
self.TYPE = TYPE
self.op = op
def __repr__(self):
return ("CreationPoint(<0x%x>, %r, %s, esc=%s)" %
(uid(self), self.TYPE, self.creation_method, self.escapes))
class VarState(object):
def __init__(self, *creps):
self.creation_points = set()
for crep in creps:
self.creation_points.add(crep)
def contains(self, other):
return other.creation_points.issubset(self.creation_points)
def merge(self, other):
creation_points = self.creation_points.union(other.creation_points)
return VarState(*creation_points)
def setescapes(self):
changed = []
for crep in self.creation_points:
if not crep.escapes:
changed.append(crep)
crep.escapes = True
return changed
def setreturns(self):
changed = []
for crep in self.creation_points:
if not crep.returns:
changed.append(crep)
crep.returns = True
return changed
def does_escape(self):
for crep in self.creation_points:
if crep.escapes:
return True
return False
def does_return(self):
for crep in self.creation_points:
if crep.returns:
return True
return False
def __repr__(self):
return "<VarState %s>" % (self.creation_points, )
class AbstractDataFlowInterpreter(object):
def __init__(self, translation_context):
self.translation_context = translation_context
self.scheduled = {} # block: graph containing it
self.varstates = {} # var-or-const: state
self.creationpoints = {} # var: creationpoint
self.constant_cps = {} # const: creationpoint
self.dependencies = {} # creationpoint: {block: graph containing it}
self.functionargs = {} # graph: list of state of args
self.flown_blocks = {} # block: True
def seen_graphs(self):
return self.functionargs.keys()
def getstate(self, var_or_const):
if not isonheap(var_or_const):
return None
if var_or_const in self.varstates:
return self.varstates[var_or_const]
if isinstance(var_or_c
|
onst, Variable):
varstate = VarState()
else:
|
if var_or_const not in self.constant_cps:
crep = CreationPoint("constant", var_or_const.concretetype)
self.constant_cps[var_or_const] = crep
else:
crep = self.constant_cps[var_or_const]
varstate = VarState(crep)
self.varstates[var_or_const] = varstate
return varstate
def getstates(self, varorconstlist):
return [self.getstate(var) for var in varorconstlist]
def setstate(self, var, state):
self.varstates[var] = state
def get_creationpoint(self, var, method="?", op=None):
if var in self.creationpoints:
return self.creationpoints[var]
crep = CreationPoint(method, var.concretetype, op)
self.creationpoints[var] = crep
return crep
def schedule_function(self, graph):
startblock = graph.startblock
if graph in self.functionargs:
args = self.functionargs[graph]
else:
args = []
for var in startblock.inputargs:
if not isonheap(var):
varstate = None
else:
crep = self.get_creationpoint(var, "arg")
varstate = VarState(crep)
self.setstate(var, varstate)
args.append(varstate)
self.scheduled[startblock] = graph
self.functionargs[graph] = args
resultstate = self.getstate(graph.returnblock.inputargs[0])
return resultstate, args
def flow_block(self, block, graph):
self.flown_blocks[block] = True
if block is graph.returnblock:
if isonheap(block.inputargs[0]):
self.returns(self.getstate(block.inputargs[0]))
return
if block is graph.exceptblock:
if isonheap(block.inputargs[0]):
self.escapes(self.getstate(block.inputargs[0]))
if isonheap(block.inputargs[1]):
self.escapes(self.getstate(block.inputargs[1]))
return
self.curr_block = block
self.curr_graph = graph
for op in block.operations:
self.flow_operation(op)
for exit in block.exits:
args = self.getstates(exit.args)
targetargs = self.getstates(exit.target.inputargs)
# flow every block at least once
if (multicontains(targetargs, args) and
exit.target in self.flown_blocks):
continue
for prevstate, origstate, var in zip(args, targetargs,
exit.target.inputargs):
if not isonheap(var):
continue
newstate = prevstate.merge(origstate)
self.setstate(var, newstate)
self.scheduled[exit.target] = graph
def flow_operation(self, op):
args = self.getstates(op.args)
opimpl = getattr(self, 'op_' + op.opname, None)
if opimpl is not None:
res = opimpl(op, *args)
if res is not NotImplemented:
self.setstate(op.result, res)
return
if isonheap(op.result) or filter(None, args):
for arg in args:
if arg is not None:
self.escapes(arg)
def complete(self):
while self.scheduled:
block, graph = self.scheduled.popitem()
self.flow_block(block, graph)
def escapes(self, arg):
changed = arg.setescapes()
self.handle_changed(changed)
def returns(self, arg):
changed = arg.setreturns()
self.handle_changed(changed)
def handle_changed(self, changed):
for crep in changed:
if crep not in self.dependencies:
continue
self.scheduled.update(self.dependencies[crep])
def register_block_dependency(self, state, block=None, graph=None):
if block is None:
block = self.curr_block
graph = self.curr_graph
for crep in state.creation_points:
self.dependencies.setdefault(crep, {})[block] = graph
def register_state_dependency(self, state1, state2):
"state1 depends on state2: if state2 does escape/change, so does state1"
# change state1 according to how state2 is now
if state2.does_escape():
self.escapes(state1)
if state2.does_return():
self.returns(state1)
# register a dependency of the current block on state2:
# that means that if state2 changes the current block will be reflown
# triggering this function again and thus updating state1
self.register_block_dependency(state2)
# _____________________________________________________________________
# operation implementations
def op_malloc(self, op, typestate, flagsstate):
assert flagsstate is None
flags = op.args[1].value
if flags != {'flavor': 'gc'}:
return NotImplemented
return VarState(self.get_creationpoint(op.result, "malloc", op))
def op_malloc_varsize(self, op, typestate, flagsstate, lengthstate):
assert flagsstate is None
flags = op.args[1].value
if flags != {'flavor': 'gc'}:
return NotImplemented
return VarState(self.get_creationpoint(op.result, "malloc_varsize", op))
|
stbenjam/katello-agent
|
src/setup.py
|
Python
|
gpl-2.0
| 1,603
| 0.001871
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
from platform import python_version
from setuptools import setup, find_packages
major, minor, micro = python_version().split('.')
if major != '2' or minor not in ['4', '5', '6', '7']:
raise Exception('unsuppor
|
ted version of python')
requires = [
]
setup(
name='katello-agent',
version='0.1',
description='Katello Agent',
author='Jeff Ortel',
author_email='jortel@redhat.com',
url='',
license='GPLv2+',
packages=find_packages(),
scripts = [
],
include_package_data=False,
data_files=[],
classifiers=[
'License :: OSI Approved :: GNU General Puclic License (GPL)',
'Programming Language :: Python',
'Operating System :: POSIX',
'T
|
opic :: Content Management and Delivery',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
],
install_requires=requires,
)
|
satish-suradkar/pyresttest
|
pyresttest/testapp/testapp/urls.py
|
Python
|
apache-2.0
| 535
| 0
|
from django.conf.urls import patterns, include, url
from testapp.api import PersonResource
from django.contrib
|
import admin
admin.autodiscover()
person_resource = PersonResource()
urlpatterns = patterns('',
|
# Examples:
# url(r'^$', 'testapp.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^api/', include(person_resource.urls))
)
|
oseemann/cvsreview
|
app/urls.py
|
Python
|
gpl-3.0
| 1,023
| 0.006843
|
# vim:set tabstop=3 shiftwidth=3 expandtab:
# vim:set autoindent smarttab nowrap:
from django.conf.urls.defaults import *
import settings
urlpatterns = patterns('',
(r'^$',
|
'webreview.views.index'),
(r'^skip/(?P<skip>.*)$', 'webreview.views.changes'),
(r'^diff/(?P<change_id>.*)/html$', 'webreview.views.diffhtml'),
(r'^addmodule$', 'webreview.views.addmodule'),
(r'^login$', 'webreview.views.login'),
(r'^changes/all/skip/(?P<skip>\d+)$', 'webreview.views.index'),
(r'^changes/all$',
|
'webreview.views.index'),
(r'^changes/(?P<filter>.*)/(?P<filter_id>\d+)/skip/(?P<skip>\d*)$', 'webreview.views.changes'),
(r'^changes/(?P<filter>.*)/(?P<filter_id>\d+)$', 'webreview.views.changes'),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
adwiputra/LUMENS-repo
|
processing/r/RAlgorithm.py
|
Python
|
gpl-2.0
| 22,835
| 0.001927
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
RAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os, numbers
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.ProcessingLog import ProcessingLog
from processing.gui.Help2Html import Help2Html
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterString import ParameterString
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterTableField import ParameterTableField
from processing.parameters.ParameterExtent import ParameterExtent
from processing.parameters.ParameterFile import ParameterFile
from processing.outputs.OutputTable import OutputTable
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputRaster import OutputRaster
from processing.outputs.OutputHTML import OutputHTML
from processing.outputs.OutputFile import OutputFile
from processing.outputs.OutputString import OutputString
from processing.outputs.OutputNumber import OutputNumber
from processing.tools.system import isWindows
from processing.script.WrongScriptException import WrongScriptException
from processing.r.RUtils import RUtils
class RAlgorithm(GeoAlgorithm):
R_CONSOLE_OUTPUT = 'R_CONSOLE_OUTPUT'
RPLOTS = 'RPLOTS'
KNITR_REPORT = 'KNITR_REPORT'
def getCopy(self):
newone = RAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
def __init__(self, descriptionFile, script=None):
GeoAlgorithm.__init__(self)
self.script = script
self.descriptionFile = descriptionFile
if script is not None:
self.defineCharacteristicsFromScript()
if descriptionFile is not None:
self.defineCharacteristicsFromFile()
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../images/r.png')
def defineCharacteristicsFromScript(self):
lines = self.script.split('\n')
self.name = '[Unnamed algorithm]'
self.group = 'User R scripts'
self.parseDescription(iter(lines))
def defineCharacteristicsFromFile(self):
filename = os.path.basename(self.descriptionFile)
self.name = filename[:filename.rfind('.')].replace('_', ' ')
self.group = 'User R scripts'
with open(self.descriptionFile, 'r') as f:
lines = [line.strip() for line in f]
self.parseDescription(iter(lines))
def parseDescription(self, lines):
self.outputNumberName = ''
self.outputNumberBool = False
self.script = ''
self.commands = []
self.report = False
self.showPlots = False
self.showConsoleOutput = False
self.useRasterPackage = True
self.passFileNames = False
self.useShapefilesPackage = False
self.verboseCommands = []
ender = 0
line = lines.next().strip('\n').strip('\r')
while ender < 10:
if line.startswith('##'):
try:
self.processParameterLine(line)
except Exception:
raise WrongScriptException('Could not load R script:'
+ self.descriptionFile + '.\n Problem with line "'
+ line + '"')
elif line.startswith('>'):
self.commands.append(line[1:])
self.verboseCommands.append(line[1:])
if not self.showConsoleOutput:
self.addOutput(OutputHTML(RAlgorithm.R_CONSOLE_OUTPUT,
'R Console Output'))
self.showConsoleOutput = True
else:
if line == '':
ender += 1
else:
ender = 0
self.commands.append(line)
self.script += line + '\n'
try:
line = lines.next().strip('\n').strip('\r')
except:
break
def getVerboseCommands(self):
return self.verboseCommands
def createDescriptiveName(self, s):
return s.replace('_', ' ')
def processParameterLine(self, line):
param = None
out = None
line =
|
line.replace('#', '')
if line.lower().strip().startswith('report'):
|
self.report = True
self.addOutput(OutputHTML(RAlgorithm.KNITR_REPORT, 'HTML Report'))
return
if line.lower().strip().startswith('showplots'):
self.showPlots = True
self.addOutput(OutputHTML(RAlgorithm.RPLOTS, 'R Plots'))
return
if line.lower().strip().startswith('dontuserasterpackage'):
self.useRasterPackage = False
return
if line.lower().strip().startswith('passfilenames'):
self.passFileNames = True
return
if line.lower().strip().startswith('shapefilespackage'):
self.useShapefilesPackage = True
return
tokens = line.split('=')
desc = self.createDescriptiveName(tokens[0])
if tokens[1].lower().strip() == 'group':
self.group = tokens[0]
return
if tokens[1].lower().strip().startswith('raster'):
param = ParameterRaster(tokens[0], desc, False)
elif tokens[1].lower().strip() == 'vector':
param = ParameterVector(tokens[0], desc,
[ParameterVector.VECTOR_TYPE_ANY])
elif tokens[1].lower().strip() == 'table':
param = ParameterTable(tokens[0], desc, False)
elif tokens[1].lower().strip().startswith('multiple raster'):
param = ParameterMultipleInput(tokens[0], desc,
ParameterMultipleInput.TYPE_RASTER)
param.optional = False
elif tokens[1].lower().strip() == 'multiple vector':
param = ParameterMultipleInput(tokens[0], desc,
ParameterMultipleInput.TYPE_VECTOR_ANY)
param.optional = False
elif tokens[1].lower().strip().startswith('selection'):
options = tokens[1].strip()[len('selection'):].split(';')
param = ParameterSelection(tokens[0], desc, options)
elif tokens[1].lower().strip().startswith('boolean'):
default = tokens[1].strip()[len('boolean') + 1:]
param = ParameterBoolean(tokens[0], desc, default)
elif tokens[1].lower().strip().startswith('number'):
try:
default = float(tokens[1].strip()[len('number') + 1:])
param = ParameterNumber(tokens[0], desc, default=default)
except:
|
qiyuangong/leetcode
|
python/422_Valid_Word_Square.py
|
Python
|
mit
| 805
| 0.001242
|
class Solution(object):
def validWordSquare(self, words):
"""
:t
|
ype words: List[str]
:rtype: bool
"""
if words is None or len(words) == 0:
return True
ls = len(words)
for i in range(ls):
for j in range(1, len(words[i])):
if j >= ls:
return False
if i >= len(words[j]):
return False
|
if words[i][j] != words[j][i]:
return False
return True
# def validWordSquare(self, words):
# # https://discuss.leetcode.com/topic/63423/1-liner-python/2
# # The map(None, ...) transposes the "matrix", filling missing spots with None
# return map(None, *words) == map(None, *map(None, *words))
|
yeti-platform/yeti
|
plugins/feeds/public/threatfox.py
|
Python
|
apache-2.0
| 2,925
| 0
|
import logging
from datetime import timedelta
from core import Feed
import pandas as pd
from core.observables import Ip, Observable
from core.errors import ObservableValidationError
class ThreatFox(Feed):
default_values = {
"frequency": timedelta(hours=1),
"name": "ThreatFox",
"source": "https://threatfox.abuse.ch/export/json/recent/",
"description": "Feed ThreatFox by Abuse.ch",
}
def update(self):
for index, line in self.update_json():
self.analyze(line)
def update_json(self):
r = self._make_request(sort=False)
if r:
res = r.json()
values = [r[0] for r in res.values()]
df = pd.DataFrame(values)
df["first_seen_utc"] = pd.to_datetime(df["first_seen_utc"])
df["last_seen_utc"] = pd.to_datetime(df["last_seen_utc"])
if self.last_run:
df = df[df["first_seen_utc"] > self.last_run]
df.fillna("-", inplace=True)
return df.iterrows()
def analyze(self, item):
first_seen = item["first_seen_utc"]
ioc_value = item["ioc_value"]
ioc_type = item["ioc_type"]
threat_type = item["threat_type"]
malware_alias = item["malware_alias"]
malware_printable = item["malware_printable"]
last_seen_utc = item["last_seen_utc"]
confidence_level = item["confidence_level"]
reference = item["reference"]
reporter = item["reporter"]
tags = []
context = {"source": self.name}
context["first_seen"] = first_seen
if reference:
context["reference"] = reference
else:
context["reference"] = "Unknown"
if reporter:
context["reporter"] = reporter
else:
context["reporter"] = "Unknown"
if threat_type:
context["threat_type"] = threat_type
if item["tags"]:
tags.extend(item["tags"].split(","))
if malware_printable:
tags.append(malware_printable)
if malware_alias:
context["malware_alias"] = malware_alias
if last_seen_utc:
context["last_seen_utc"] = last_seen_utc
if confidence_level:
context["confidence_level"] = confidence_
|
level
value = None
obs = None
try:
if "ip" in ioc_type:
value, port = ioc_value.split(":")
context["port"] = port
obs = Ip.get_or_create(value=value)
else:
obs = Observable.add_text(ioc_value)
except ObservableValidationError as e:
loggi
|
ng.error(e)
return
if obs:
obs.add_context(context)
obs.add_source(self.name)
if tags:
obs.tag(tags)
if malware_printable:
obs.tags
|
arlewis/arl_galbase
|
single_cutout_test_newmethod.py
|
Python
|
mit
| 18,179
| 0.004951
|
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import os, sys, time
import numpy as np
from pdb import set_trace
import montage_wrapper as montage
import shutil
import gal_data
import config
import glob
from scipy.ndimage.interpolation import zoom
#_TOP_DIR = '/data/tycho/0/leroy.42/allsky/'
#_INDEX_DIR = os.path.join(_TOP_DIR, 'code/')
_HOME_DIR = '/n/home00/lewis.1590/research/galbase_allsky/'
_DATA_DIR = '/n/home00/lewis.1590/research/galbase/gal_data/'
#_MOSAIC_DIR = os.path.join(_HOME_DIR, 'cutouts')
_GAL_DIR = os.path.join(_HOME_DIR, 'ngc2976')
_INPUT_DIR = os.path.join(_GAL_DIR, 'input')
_MOSAIC_DIR = os.path.join(_GAL_DIR, 'mosaics')
def get_args():
import argparse
parser = argparse.ArgumentParser(description='Create cutouts of a given size around each galaxy center.')
parser.add_argument('--size', default=30.,help='cutout size in arcminutes')
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--copy', action='store_true')
parser.add_argument('--convolve', action='store_true')
parser.add_argument('--align', action='store_true')
return parser.parse_args()
def create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale):
hdr = pyfits.Header()
hdr['NAXIS'] = 2
hdr['NAXIS1'] = pix_len
hdr['NAXIS2'] = pix_len
hdr['CTYPE1'] = 'RA---TAN'
hdr['CRVAL1'] = float(ra_ctr)
hdr['CRPIX1'] = (pix_len / 2.) * 1.
hdr['CDELT1'] = -1.0 * pix_scale
hdr['CTYPE2'] = 'DEC--TAN'
hdr['CRVAL2'] = float(dec_ctr)
hdr['CRPIX2'] = (pix_len / 2.) * 1.
hdr['CDELT2'] = pix_scale
hdr['EQUINOX'] = 2000
return hdr
def make_axes(hdr, quiet=False, novec=False, vonly=False, simple=False):
# PULL THE IMAGE/CUBE SIZES FROM THE HEADER
naxis = hdr['NAXIS']
naxis1 = hdr['NAXIS1']
naxis2 = hdr['NAXIS2']
if naxis > 2:
naxis3 = hdr['NAXIS3']
## EXTRACT FITS ASTROMETRY STRUCTURE
ww = pywcs.WCS(hdr)
#IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)
if naxis > 3:
#GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER
cd = ww.wcs.cd
crpix = ww.wcs.crpix
cdelt = ww.wcs.crelt
crval = ww.wcs.crval
if naxis > 2:
# MAKE THE VELOCITY AXIS (WILL BE M/S)
v = np.arange(naxis3) * 1.0
vdif = v - (hdr['CRPIX3']-1)
vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])
# CUT OUT HERE IF WE ONLY WANT VELOCITY INFO
if vonly:
return vaxis
#IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:
if simple:
print('Using simple aproach to make axes.')
print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')
raxis = np.arange(naxis1) * 1.0
rdif = raxis - (hdr['CRPIX1'] - 1)
raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])
daxis = np.arange(naxis2) * 1.0
ddif = daxis - (hdr['CRPIX1'] - 1)
daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])
rimg = raxis # (fltarr(naxis2) + 1.)
dimg = (np.asarray(naxis1) + 1.) # daxis
return rimg, dimg
# OBNOXIOUS SFL/GLS THING
glspos = ww.wcs.ctype[0].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[0]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[0] = ctstr
print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])
glspos = ww.wcs.ctype[1].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[1]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[1] = ctstr
print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])
# CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE
if novec:
rimg = np.zeros((naxis1, naxis2))
dimg = np.zeros((naxis1, naxis2))
for i in range(naxis1):
j = np.asarray([0 for i in xrange(naxis2)])
pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)
ra, dec = ww.all_pix2world(pixcrd, 1)
rimg[i, :] = ra
dimg[i, :] = dec
else:
ximg = np.arange(naxis1) * 1.0
yimg = np.arange(naxis1) * 1.0
X, Y = np.meshgrid(ximg, yimg, indexing='xy')
ss = X.shape
xx, yy = X.flatten(), Y.flatten()
pixcrd = np.array(zip(xx, yy), np.float_)
img_new = ww.all_pix2world(pixcrd, 0)
rimg_new, dimg_new = img_new[:,0], img_new[:,1]
rimg = rimg_new.reshape(ss)
dimg = dimg_new.reshape(ss)
# GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW
raxis = np.squeeze(rimg[:, naxis2/2])
daxis = np.squeeze(dimg[naxis1/2, :])
return rimg, dimg
def convert_files(gal_dir, im_dir, wt_dir, band, fuv_toab, nuv_toab, pix_as):
converted_dir = os.path.join(gal_dir, 'converted')
os.makedirs(converted_dir)
intfiles = sorted(glob.glob(os.path.join(im_dir, '*-int.fits')))
wtfiles = sorted(glob.glob(os.path.join(wt_dir, '*-rrhr.fits')))
int_outfiles = [os.path.join(converted_dir, os.path.basename(f).replace('.fits', '_mjysr.fits')) for f in intfiles]
wt_outfiles = [os.path.join(converted_dir, os.path.basename(f)) for f in wtfiles]
for i in range(len(intfiles)):
if os.path.exists(wtfiles[i]):
im, hdr = pyfits.getdata(intfiles[i], header=True)
wt, whdr = pyfits.getdata(wtfiles[i], header=True)
#wt = wtpersr(wt, pix_as)
#if band.lower() == 'fuv':
# im = counts2jy_galex(im, fuv_toab, pix_as)
if band.lower() == 'nuv'
|
:
im = counts2jy_galex(im, nuv_toab, pix_as)
if not os.path.exists(int_outfiles[i]):
#
|
im -= np.mean(im)
pyfits.writeto(int_outfiles[i], im, hdr)
if not os.path.exists(wt_outfiles[i]):
pyfits.writeto(wt_outfiles[i], wt, whdr)
else:
continue
return converted_dir, converted_dir
def counts2jy_galex(counts, cal, pix_as):
# first convert to abmag
abmag = -2.5 * np.log10(counts) + cal
# then convert to Jy
f_nu = 10**(abmag/-2.5) * 3631.
# then to MJy
f_nu *= 1e-6
# then to MJy/sr
val = f_nu / (np.radians(pix_as/3600))**2
return val
#val = flux / MJYSR2JYARCSEC / pixel_area / 1e-23 / C * FUV_LAMBDA**2
def write_headerfile(header_file, header):
f = open(header_file, 'w')
for iii in range(len(header)):
outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\n'
f.write(outline)
f.close()
def mask_images(cnt_dir, exp_dir, flag_dir, gal_dir):
masked_dir = os.path.join(gal_dir, 'masked')
os.makedirs(masked_dir)
cnt_masked_dir = os.path.join(masked_dir, 'cnt')
t_masked_dir = os.path.join(masked_dir, 'rrhr')
os.makedirs(int_masked_dir)
os.makedirs(wt_masked_dir)
cnt_suff, exp_suff, flag_suff = '*-cnt.fits', '*-rrhr.fits', '*-flag.fits'
cnt_images = sorted(glob.glob(os.path.join(cnt_dir, cnt_suff)))
rrhr_images = sorted(glob.glob(os.path.join(exp_dir, exp_suff)))
flag_images = sorted(glob.glob(os.path.join(flag_dir, flag_suff)))
for i in range(len(int_images)):
image_infile = int_images[i]
time_infile = rrhr_images[i]
flag_infile = flag_images[i]
image_outfile = os.path.join(int_masked_dir, os.path.basename(image_infile))
wt_outfile = os.path.join(wt_masked_dir, os.path.basename(wt_infile))
mask_galex(image_infile, time_infile, flag_infile, out_intfile=image_outfile, out_wtfile=wt_outfile)
return int_masked_dir, wt_masked_dir
def mask_galex(cntfile, timefile, flagfile, outfile=None, chip_rad = 1400, chip_x0=1920, chip_y0=1920, out_intfile=None, out_wtfile=None):
if out_intfile is None:
out_intfile = intfile.replace('.fits', '_masked.fits')
if out_wtfile is None:
out_wtfile = wtfile.replace('.fits', '_masked.fits')
if not os.path.exists(out_intfile):
cnt, hdr = pyfits.getdata(cntfile, header=True)
exp, whdr = pyfits.getdata(timefile, header=True)
flag, fhdr = pyfits.g
|
john-wang-metro/metro-openerp
|
bug_fix/ir_mail_server.py
|
Python
|
agpl-3.0
| 25,985
| 0.005118
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 OpenERP S.A (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Charset import Charset
from email.Header import Header
from email.Utils import formatdate, make_msgid, COMMASPACE
from email import Encoders
import logging
import re
import smtplib
import threading
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools import html2text
import openerp.tools as tools
# ustr was originally from tools.misc.
# it is moved to loglevels until we refactor tools.
from openerp.loglevels import ustr
_logger = logging.getLogger(__name__)
class MailDeliveryException(osv.except_osv):
"""Specific exception subclass for mail delivery errors"""
def __init__(self, name, value):
super(MailDeliveryException, self).__init__(name, value)
class WriteToLogger(object):
"""debugging helper: behave as a fd and pipe to logger at the given level"""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, s):
self.logger.log(self.level, s)
def try_coerce_ascii(string_utf8):
"""Attempts to decode the given utf8-encoded string
as ASCII after coercing it to UTF-8, then return
the confirmed 7-bit ASCII string.
If the process fails (because the string
contains non-ASCII characters) returns ``None``.
"""
try:
string_utf8.decode('ascii')
except UnicodeDecodeError:
return
return string_utf8
def encode_header(header_text):
"""Returns an appropriate representation of the given header value,
suitable for direct assignment as a header value in an
email.message.Message. RFC2822 assumes that headers contain
only 7-bit characters, so we ensure it is the case, using
RFC2047 encoding when needed.
:param header_text: unicode or utf-8 encoded string with header value
:rtype: string | email.header.Header
:return: if ``header_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an email.header.Header
that will perform the appropriate RFC2047 encoding of
non-ASCII values.
"""
if not header_text: return ""
# convert anything to utf-8, suitable for testing ASCIIness, as 7-bit chars are
# encoded as ASCII in utf-8
header_text_utf8 = tools.ustr(header_text).encode('utf-8')
header_text_ascii = try_coerce_ascii(header_text_utf8)
# if this header contains non-ASCII characters,
# we'll need to wrap it up in a message.header.Header
# that will take care of RFC2047-encoding it as
# 7-bit string.
|
return header_text_ascii if header_text_ascii\
else Header(header_text_utf8, 'utf-8')
def encode_header_param(param_text):
"""Returns
|
an appropriate RFC2047 encoded representation of the given
header parameter value, suitable for direct assignation as the
param value (e.g. via Message.set_param() or Message.add_header())
RFC2822 assumes that headers contain only 7-bit characters,
so we ensure it is the case, using RFC2047 encoding when needed.
:param param_text: unicode or utf-8 encoded string with header value
:rtype: string
:return: if ``param_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an
ASCII string containing the RFC2047 encoded text.
"""
# For details see the encode_header() method that uses the same logic
if not param_text: return ""
param_text_utf8 = tools.ustr(param_text).encode('utf-8')
param_text_ascii = try_coerce_ascii(param_text_utf8)
return param_text_ascii if param_text_ascii\
else Charset('utf8').header_encode(param_text_utf8)
name_with_email_pattern = re.compile(r'("[^<@>]+")\s*<([^ ,<@]+@[^> ,]+)>')
address_pattern = re.compile(r'([^ ,<@]+@[^> ,]+)')
def extract_rfc2822_addresses(text):
"""Returns a list of valid RFC2822 addresses
that can be found in ``source``, ignoring
malformed ones and non-ASCII ones.
"""
if not text: return []
candidates = address_pattern.findall(tools.ustr(text).encode('utf-8'))
return filter(try_coerce_ascii, candidates)
def encode_rfc2822_address_header(header_text):
"""If ``header_text`` contains non-ASCII characters,
attempts to locate patterns of the form
``"Name" <address@domain>`` and replace the
``"Name"`` portion by the RFC2047-encoded
version, preserving the address part untouched.
"""
header_text_utf8 = tools.ustr(header_text).encode('utf-8')
header_text_ascii = try_coerce_ascii(header_text_utf8)
if header_text_ascii:
return header_text_ascii
# non-ASCII characters are present, attempt to
# replace all "Name" patterns with the RFC2047-
# encoded version
def replace(match_obj):
name, email = match_obj.group(1), match_obj.group(2)
name_encoded = str(Header(name, 'utf-8'))
return "%s <%s>" % (name_encoded, email)
header_text_utf8 = name_with_email_pattern.sub(replace,
header_text_utf8)
# try again after encoding
header_text_ascii = try_coerce_ascii(header_text_utf8)
if header_text_ascii:
return header_text_ascii
# fallback to extracting pure addresses only, which could
# still cause a failure downstream if the actual addresses
# contain non-ASCII characters
return COMMASPACE.join(extract_rfc2822_addresses(header_text_utf8))
class ir_mail_server(osv.osv):
"""Represents an SMTP server, able to send outgoing emails, with SSL and TLS capabilities."""
_name = "ir.mail_server"
_columns = {
'name': fields.char('Description', size=64, required=True, select=True),
'smtp_host': fields.char('SMTP Server', size=128, required=True, help="Hostname or IP of SMTP server"),
'smtp_port': fields.integer('SMTP Port', size=5, required=True, help="SMTP Port. Usually 465 for SSL, and 25 or 587 for other cases."),
'smtp_user': fields.char('Username', size=64, help="Optional username for SMTP authentication"),
'smtp_pass': fields.char('Password', size=64, help="Optional password for SMTP authentication"),
'smtp_encryption': fields.selection([('none','None'),
('starttls','TLS (STARTTLS)'),
('ssl','SSL/TLS')],
string='Connection Security', required=True,
help="Choose the connection encryption scheme:\n"
"- None: SMTP sessions are done in cleartext.\n"
"- TLS (STARTTLS): TLS encryption is requested at start of SMTP session (Recommended)\n"
"- SSL/TLS: SMTP sessions are encrypted with SSL/TLS through a dedicated port (default: 465)"),
'smtp_debug': fields.boolean('Debugging', help=
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/stop_iteration_inside_generator.py
|
Python
|
mit
| 3,242
| 0.005552
|
"""
Test that no StopIteration is raised inside a generator
"""
# pylint: disable=missing-docstring,invalid-name,import-error, try-except-raise, wrong-import-position,not-callable,raise-missing-from
import asyncio
class RebornStopIteration(StopIteration):
"""
A class inheriting from StopIteration exception
"""
# This one is ok
def gen_ok():
yield 1
yield 2
yield 3
# pylint should warn about this one
# because of a direct raising of StopIteration inside generator
def gen_stopiter():
yield 1
yield 2
yield 3
raise StopIteration # [stop-iteration-return]
# pylint should warn about this one
# because of a direct raising of an exception inheriting from StopIteration inside generator
def gen_stopiterchild():
yield 1
yield 2
yield 3
raise RebornStopIteration # [stop-iteration-return]
# pylint should warn here
# because of the possibility that next raises a StopIteration exception
def gen_next_raises_stopiter():
g = gen_ok()
while True:
yield next(g) # [stop-iteration-return]
# This one is the same as gen_next_raises_stopiter
# but is ok because the next function is inside
# a try/except block handling StopIteration
def gen_next_inside_try_except():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that don't handle StopIteration
def gen_next_inside_wrong_try_except():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that handle StopIteration but reraise it
def gen_next_inside_wrong_try_except2():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
raise StopIteration # [stop-iteration-return]
# Those two last are ok
def gen_in_for():
for el in gen_ok():
yield el
def gen_yield_from():
yield from gen_ok()
def gen_dont_crash_on_no_exception():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
raise
def gen_dont_crash_on_uninferabl
|
e():
# https://github.com/PyCQA/pylint/issues/1779
yield from iter()
raise asyncio.TimeoutError()
# https://github.com/PyCQA/pylint/issues/1830
def gen_next_with_sentinel():
yield next([], 42) # No bad return
from itertools import count
# https://github.com/PyCQA/pylint/issue
|
s/2158
def generator_using_next():
counter = count()
number = next(counter)
yield number * 2
# pylint: disable=no-self-use,too-few-public-methods
class SomeClassWithNext:
def next(self):
return iter([1, 2, 3])
def some_gen(self):
for value in self.next():
yield value
SomeClassWithNext().some_gen()
def something_invalid():
raise Exception('cannot iterate this')
def invalid_object_passed_to_next():
yield next(something_invalid()) # [stop-iteration-return]
|
SEA000/uw-empathica
|
empathica/gluon/contrib/pysimplesoap/server.py
|
Python
|
mit
| 17,610
| 0.005849
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Simple SOAP Server implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.02c"
from simplexml import SimpleXMLElement, TYPE_MAP, DateTime, Date, Decimal
DEBUG = False
class SoapDispatcher(object):
"Simple Dispatcher for SOAP Server"
def __init__(self, name, documentation='', action='', location='',
namespace=None, prefix=False,
soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
soap_ns='soap',
**kwargs):
self.methods = {}
self.name = name
self.documentation = documentation
self.action = action # base SoapAction
self.location = location
self.namespace = namespace # targetNamespace
self.prefix = prefix
self.soap_ns = soap_ns
self.soap_uri = soap_uri
def register_function(self, name, fn, returns=None, args=None, doc=None):
self.methods[name] = fn, returns, args, doc or getattr(fn,"__doc__","")
def dispatch(self, xml, action=None):
"Receive and proccess SOAP call"
# default values:
prefix = self.prefix
ret = fault = None
soap_ns, soap_uri = self.soap_ns, self.soap_uri
soap_fault_code = 'VersionMismatch'
try:
request = SimpleXMLElement(xml, namespace=self.namespace)
# detect soap prefix and uri (xmlns attributes of Envelope)
for k, v in request[:]:
if v in ("http://schemas.xmlsoap.org/soap/envelope/",
"http://www.w3.org/2003/05/soap-env",):
soap_ns = request.attributes()[k].localName
soap_uri = request.attributes()[k].value
soap_fault_code = 'Client'
# parse request message and get local method
method = request('Body', ns=soap_uri).children()(0)
if action:
# method name = action
name = action[len(self.action)+1:-1]
prefix = self.prefix
if not action or not name:
# method name = input message name
name = method.get_local_name()
prefix = method.get_pref
|
ix()
if DEBUG: print "dispatch method", name
function, returns_types, args_types, doc = self.methods[name]
# de-serialize parameters (if type definitions given)
if args_types:
args = method.
|
children().unmarshall(args_types)
elif args_types is None:
args = {'request':method} # send raw request
else:
args = {} # no parameters
soap_fault_code = 'Server'
# execute function
ret = function(**args)
if DEBUG: print ret
except Exception, e:
import sys
etype, evalue, etb = sys.exc_info()
if DEBUG:
import traceback
detail = ''.join(traceback.format_exception(etype, evalue, etb))
detail += '\n\nXML REQUEST\n\n' + xml
else:
detail = None
fault = {'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
'faultstring': unicode(evalue),
'detail': detail}
# build response message
if not prefix:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
else:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"
xmlns:%(prefix)s="%(namespace)s"/>"""
xml = xml % {'namespace': self.namespace, 'prefix': prefix,
'soap_ns': soap_ns, 'soap_uri': soap_uri}
response = SimpleXMLElement(xml, namespace=self.namespace,
prefix=prefix)
response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
body = response.add_child("%s:Body" % soap_ns, ns=False)
if fault:
# generate a Soap Fault (with the python exception)
body.marshall("%s:Fault" % soap_ns, fault, ns=False)
else:
# return normal value
res = body.add_child("%sResponse" % name, ns=prefix)
if not prefix:
res['xmlns'] = self.namespace # add target namespace
# serialize returned values (response) if type definition available
if returns_types:
if not isinstance(ret, dict):
res.marshall(returns_types.keys()[0], ret, )
else:
for k,v in ret.items():
res.marshall(k, v)
elif returns_types is None:
# merge xmlelement returned
res.import_node(ret)
return response.as_xml()
# Introspection functions:
def list_methods(self):
"Return a list of aregistered operations"
return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()]
def help(self, method=None):
"Generate sample request and response messages"
(function, returns, args, doc) = self.methods[method]
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method':method, 'namespace':self.namespace}
request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if args:
items = args.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k,v in items:
request(method).marshall(k, v, add_comments=True, ns=False)
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)sResponse xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method':method, 'namespace':self.namespace}
response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if returns:
items = returns.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k,v in items:
response('%sResponse'%method).marshall(k, v, add_comments=True, ns=False)
return request.as_xml(pretty=True), response.as_xml(pretty=True), doc
def wsdl(self):
"Generate Web Service Description v1.1"
xml = """<?xml version="1.0"?>
<wsdl:definitions name="%(name)s"
targetNamespace="%(namespace)s"
xmlns:tns="%(namespace)s"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation>
<wsdl:types>
<xsd:schema targetNamespace="%(namespace)s"
elementFormDefault="qualified"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</xsd:schema>
</wsdl:types>
</wsdl:definitions>
""" % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation}
wsdl = SimpleXMLElement(xml)
for method, (function, returns, args, doc) in self.methods.items():
# create elements:
def parse_element(name, values, array=False, complex=False):
if not complex:
|
xesscorp/skidl
|
skidl/libs/analog_devices_sklib.py
|
Python
|
mit
| 14,637
| 0.042768
|
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
analog_devices = SchLib(tool=SKIDL).add_parts(*[
Part(name='AD623AN',dest=TEMPLATE,tool=SKIDL,keywords='ad623 instumentation amplifier dip-8',description='Single Supply, Rail to Rail, Instumentation Amplifier, RoHS, DIP-8',ref_prefix='U',num_units=1,fplist=['DIP-8*'],do_erc=True,aliases=['AD623BN', 'AD623ANZ', 'AD623BNZ'],pins=[
Pin(num='1',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Rg',func=Pin.PASSIVE,do_erc=True)]),
Part(name='AD623AR',dest=TEMPLATE,tool=SKIDL,keywords='ad623 instumentation amplifier soic-8',description='Single Supply, Rail to Rail, Instumentation Amplifier, RoHS, SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC-8*'],do_erc=True,aliases=['AD623ARZ', 'AD623BR', 'AD623BRZ'],pins=[
Pin(num='1',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Rg',func=Pin.PASSIVE,do_erc=True)]),
Part(name='AD623ARM',dest=TEMPLATE,tool=SKIDL,keywords='ad623 instumentation amplifier msop-8',description='Single Supply, Rail to Rail, Instumentation Amplifier, RoHS, MSOP-8',ref_prefix='U',num_units=1,fplist=['MSOP-8*'],do_erc=True,aliases=['AD623ARMZ'],pins=[
Pin(num='1',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Rg',func=Pin.PASSIVE,do_erc=True)]),
Part(name='AD8422ARMZ',dest=TEMPLATE,tool=SKIDL,keywords='ad8422 instumentation amplifier msop-8',description='Low Power, Rail to Rail, Instumentation Amplifier, MSOP-8',ref_prefix='U',num_units=1,fplist=['MSOP-8*'],do_erc=True,aliases=['AD8422BRMZ', 'AD8421ARMZ', 'AD8421BRMZ', 'AD8236ARMZ'],pins=[
Pin(num='1',name='-',do_erc=True),
Pin(num='2',name='Rg',func
|
=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='+',do_erc=True),
Pin(num='5',name='Vs-',func=P
|
in.PWRIN,do_erc=True),
Pin(num='6',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='Vs+',func=Pin.PWRIN,do_erc=True)]),
Part(name='AD8422ARZ',dest=TEMPLATE,tool=SKIDL,keywords='ad8429 instumentation amplifier soic-8',description='Low Noise, Instumentation Amplifier, SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC-8*'],do_erc=True,aliases=['AD8422BRZ', 'AD8421ARZ', 'AD8421BRZ', 'AD8429ARZ', 'AD8429BRZ'],pins=[
Pin(num='1',name='-',do_erc=True),
Pin(num='2',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='+',do_erc=True),
Pin(num='5',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='Vs+',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADE7758',dest=TEMPLATE,tool=SKIDL,keywords='Energy Metering',description='Poly Phase Multifunction Energy Metering, SO-24',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x15.4mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='APCF',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='DVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='AVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='IAP',do_erc=True),
Pin(num='6',name='IAN',do_erc=True),
Pin(num='7',name='IBP',do_erc=True),
Pin(num='8',name='IBN',do_erc=True),
Pin(num='9',name='ICP',do_erc=True),
Pin(num='10',name='ICN',do_erc=True),
Pin(num='20',name='CLKOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='~CS',do_erc=True),
Pin(num='12',name='REFIN/OUT',do_erc=True),
Pin(num='22',name='DIN',do_erc=True),
Pin(num='13',name='VN',func=Pin.PWRIN,do_erc=True),
Pin(num='23',name='SCLK',do_erc=True),
Pin(num='14',name='VCP',do_erc=True),
Pin(num='24',name='DOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='VBP',do_erc=True),
Pin(num='16',name='VAP',do_erc=True),
Pin(num='17',name='VARCF',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='~IRQ',func=Pin.OPENCOLL,do_erc=True),
Pin(num='19',name='CLKIN',do_erc=True)]),
Part(name='ADM2484E',dest=TEMPLATE,tool=SKIDL,keywords='RS485 Transceiver RS422 Transceiver',description='Isolated RS485/RS422 Transceiver, Half-/Full-Duplex, 500kbps,SOIC-16W',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x10.3mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='RxD',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='~RE',do_erc=True),
Pin(num='5',name='DE',do_erc=True),
Pin(num='6',name='TxD',do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='11',name='Y',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='Z',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='B',do_erc=True),
Pin(num='14',name='A',do_erc=True),
Pin(num='15',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='VDD2',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADM2587E',dest=TEMPLATE,tool=SKIDL,keywords='RS485 Transciever,RS422 Transciever',description='Isolated RS485/RS422 Transciever,Integrated Isolated DC-DC Converter, 500kbps,SO-20',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x12.8mm*Pitch1.27mm*'],do_erc=True,aliases=['ADM2582E'],pins=[
Pin(num='1',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='RxD',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='~RE',do_erc=True),
Pin(num='6',name='DE',do_erc=True),
Pin(num='7',name='TxD',do_erc=True),
Pin(num='8',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='Visoout',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='Y',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='Z',func=Pin.OUTPUT,do_erc=True),
Pin(num='16',name='GN
|
tchellomello/home-assistant
|
homeassistant/components/homematicip_cloud/climate.py
|
Python
|
apache-2.0
| 11,473
| 0.000697
|
"""Support for HomematicIP Cloud climate devices."""
import logging
from typing import Any, Dict, List, Optional, Union
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.base.enums import AbsenceType
from homematicip.device import Switch
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
HEATING_PROFILES = {"PROFILE_1": 0, "PROFILE_2": 1, "PROFILE_3": 2}
COOLING_PROFILES = {"PROFILE_4": 3, "PROFILE_5": 4, "PROFILE_6": 5}
_LOGGER = logging.getLogger(__name__)
ATTR_PRESET_END_TIME = "preset_end_time"
PERMANENT_END_TIME = "permanent"
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP climate from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.groups:
if isinstance(device, AsyncHeatingGroup):
entities.append(HomematicipHeatingGroup(hap, device))
if entities:
async_add_entities(entities)
class HomematicipHeatingGroup(HomematicipGenericEntity, ClimateEntity):
"""Representation of the HomematicIP heating group.
Heat mode is supported for all heating devices incl. their defined profiles.
Boost is available for radiator thermostats only.
Cool mode is only available for floor heating systems, if basically enabled in the hmip app.
"""
def __init__(self, hap: HomematicipHAP, device: AsyncHeatingGroup) -> None:
"""Initialize heating group."""
device.modelType = "HmIP-Heating-Group"
super().__init__(hap, device)
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = self._first_radiator_thermostat
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
return {
"identifiers": {(HMIPC_DOMAIN, self._device.id)},
"name": self._device.label,
"manufacturer": "eQ-3",
"model": self._device.modelType,
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie."""
if self._disabled_by_cooling_mode and not self._has_switch:
return HVAC_MODE_OFF
if self._device.boostMode:
return HVAC_MODE_HEAT
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT if self._heat_mode_enabled else HVAC_MODE_COOL
return HVAC_MODE_AUTO
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
if self._disabled_by_cooling_mode and not self._has_switch:
return [HVAC_MODE_OFF]
return (
[HVAC_MODE_AUTO, HVAC_MODE_HEAT]
if self._heat_mode_enabled
else [HVAC_MODE_AUTO, HVAC_MODE_COOL]
)
@property
def hvac_action(self) -> Optional[str]:
"""
Return the current hvac_action.
This is only relevant for radiator thermostats.
"""
if (
self._device.floorHeatingMode == "RADIATOR"
and self._has_radiator_thermostat
and self._heat_mode_enabled
):
return (
CURRENT_HVAC_HEAT if self._device.valvePosition else CURRENT_HVAC_IDLE
)
return None
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode."""
if self._device.boostMode:
return PRESET_BOOST
if self.hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF):
return PRESET_NONE
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType == AbsenceType.VACATION:
return PRESET_AWAY
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
]:
return PRESET_ECO
return (
self._device.activeProfile.name
if self._device.activeProfile.name in self._device_profile_names
else None
)
@property
def preset_modes(self) -> List[str]:
"""Return a list of available preset modes incl. hmip profiles."""
# Boost is only available if a radiator thermostat is in the room,
# and heat mode is enabled.
profile_names = self._device_profile_names
presets = []
if (
self._heat_mode_enabled and self._has_radiator_thermostat
) or self._has_switch:
if not profile_names:
presets.append(PRESET_NONE)
presets.append(PRESET_BOOST)
presets.extend(profile_names)
return presets
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self.min_temp <= temperature <= self.max_temp:
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
return
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
|
return
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
if preset_mode in self._devic
|
e_profile_names:
profile_idx = self._get_profile_idx_by_name(preset_mode)
if self._device.controlMode != HMIP_AUTOMATIC_CM:
await self.async_set_hvac_mode(HVAC_MODE_AUTO)
await self._device.set_active_profile(profile_idx)
@property
def
|
VanyaDNDZ/django-sybase-backend
|
sqlsybase_server/pyodbc/compiler.py
|
Python
|
unlicense
| 10,019
| 0.003394
|
from django.db.models.sql import compiler
from datetime import datetime
import re
from django.db.models.base import Model
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
index_start = len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
"""
use django as_sql with editing limit
"""
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark != 0)
if not do_offset:
return super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
select_re = re.compile('^SELECT[ ]+(DISTINCT\s)?')
query, params = super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
m = select_re.match(query)
if with_limits and m != None:
num = None
insert = None
if self.query.high_mark is not None:
num = self.query.high_mark - self.query.low_mark
if num <= 0:
return None, None
insert = 'TOP %d' % num
if insert is not None:
if m.groups()[0] != None:
query = select_re.sub('SELECT DISTINCT %s ' % insert, query)
else:
query = select_re.sub('SELECT %s ' % insert, query)
return query, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql_legacy(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
if returns_id:
result = ['SET NOCOUNT ON']
else:
result = []
result.append('INSERT INTO %s' % qn(opts.db_table))
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
if returns_id:
result.append(';\nSELECT SCOPE_IDENTITY()')
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if returns_id:
sql = "SET NOCOUNT ON"
else:
sql = ""
if len(self.query.columns) == 1 and not params:
sql += "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql += "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
if returns_id:
sql += '\n;SELECT SCOPE_IDENTITY()'
return sql, params
def as_sql(self):
from django.db.models.fields import DateTimeField
from django.db.models.fields import DateField
"""
using django as_sql()
with exclude Datetime field with None value
which is nullable
"""
# return super(SQLInsertCompiler, self).as_sql()
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
preset_fields = self.query.fields if has_fields else [opts.pk]
fields = []
if len(self.query.objs) == 1:
# check only one row insert
# multi-row pass
# so multi-row rows will crash
for field in preset_fields:
# if not isinstance(field, (DateField, DateTimeField)):
# fields.append(field)
if field.get_db_prep_save(
getattr(self.query.objs[0], field.attname) if self.query.raw else field.pre_save(self.query.objs[0], True), connection=self.connection) is not None:
fields.append(field)
elif field.blank is not True:
fields.append(field)
else:
fields = preset_fields
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.o
|
ps.bulk_insert_sql(fields, len(values)))
return [(" ".join(resu
|
lt), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self):
"""
Copy of django UpdateCommpiler a
|
lorenyu/project-euler
|
problem-036.py
|
Python
|
mit
| 813
| 0.00615
|
problem = """
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base
|
10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zero
|
s.)
"""
def is_palindromic(s):
return s[:len(s)/2] == s[:(len(s)-1)/2:-1]
def decimal2binary(num):
x = ''
while num > 0:
x = str(num % 2) + x
num /= 2
return x
double_base_palindromes = set()
for num in range(1000):
p1 = int(str(num) + str(num)[-2::-1])
p2 = int(str(num) + str(num)[::-1])
if is_palindromic(decimal2binary(p1)):
double_base_palindromes.add(p1)
if is_palindromic(decimal2binary(p2)):
double_base_palindromes.add(p2)
print sum(double_base_palindromes)
|
suyashphadtare/vestasi-erp-1
|
erpnext/erpnext/accounts/doctype/sales_invoice/pos.py
|
Python
|
agpl-3.0
| 1,595
| 0.022571
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_items(price_list, sales_or_purchase, item=None, item_group=None):
condition = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item='Yes'"
else:
condition = "i.is_purchase_item='Yes'"
if item_group and item_group != "All Item Groups":
condition += " and i.item_group='%s'" % item_group.replace("'", "\'")
if item:
condition += " and CONCAT(i.name, i.item_name) like %(name)s"
args["name"] = "%%%s%%" % item
return frappe.db.sql("""select i.name, i.item_name, i.image,
item_det.price_list_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, price_list_rate, currency from
`tabItem Price` where price_list=%s) item_det
ON
item_det.item_code=i.name
where
%s""" % ('%(price_list)s', condition), args, as_dict=1)
@frappe.whitelist()
def get_item_code(barcode_serial_no):
input_via = "serial_no"
item_code = frappe.
|
db.sql("""select name, item_code from `tabSerial No` where
name=%s""", (barcode_serial_no), as_dict=1)
if not item_code:
input_via = "barcode"
item_code = frappe.db.sql("""select name from
|
`tabItem` where barcode=%s""",
(barcode_serial_no), as_dict=1)
if item_code:
return item_code, input_via
else:
frappe.throw(frappe._("Invalid Barcode or Serial No"))
@frappe.whitelist()
def get_mode_of_payment():
return frappe.get_list("Mode of Payment")
|
ecino/compassion-switzerland
|
sponsorship_switzerland/models/correspondence.py
|
Python
|
agpl-3.0
| 583
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C)
|
2019 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Samy Bucher <samy.bucher@outlook.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
from odoo import fields
class Correspondence(models.Model):
_inherit = 'correspondence'
gift_id = fields.Many2one('sponsorship.gift',
|
'Gift')
|
undertherain/vsmlib
|
vsmlib/benchmarks/__main__.py
|
Python
|
apache-2.0
| 70
| 0
|
fr
|
om .evaluate_all import main
if __name
|
__ == "__main__":
main()
|
plotly/plotly.py
|
packages/python/plotly/codegen/__init__.py
|
Python
|
mit
| 11,939
| 0.00067
|
import json
import os
import os.path as opath
import shutil
import subprocess
from codegen.datatypes import build_datatype_py, write_datatype_py
from codegen.compatibility import (
write_deprecated_datatypes,
write_graph_objs_graph_objs,
DEPRECATED_DATATYPES,
)
from codegen.figure import write_figure_classes
from codegen.utils import (
TraceNode,
PlotlyNode,
LayoutNode,
FrameNode,
write_init_py,
ElementDefaultsNode,
build_from_imports_py,
)
from codegen.validators import (
write_validator_py,
write_data_validator_py,
get_data_validator_instance,
)
# Import notes
# ------------
# Nothing from the plotly/ package should be imported during code
# generation. This introduces a lot of complexity regarding when imports
# happen relative to when various stages of code generation occur. Instead,
# helpers that are only needed during code generation should reside in the
# codegen/ package, and helpers used both during code generation and at
# runtime should reside in the _plotly_utils/ package.
# ----------------------------------------------------------------------------
def preprocess_schema(plotly_schema):
"""
Central location to make changes to schema before it's seen by the
PlotlyNode classes
"""
# Update template
# ---------------
layout = plotly_schema["layout"]["layoutAttributes"]
# Create codegen-friendly template scheme
template = {
"data": {
trace + "s": {"items": {trace: {}}, "role": "object"}
for trace in plotly_schema["traces"]
},
"layout": {},
"description": """\
Default attributes to be applied to the plot.
This should be a dict with format: `{'layout': layoutTemplate, 'data':
{trace_type: [traceTemplate, ...], ...}}` where `layoutTemplate` is a dict
matching the structure of `figure.layout` and `traceTemplate` is a dict
matching the structure of the trace with type `trace_type` (e.g. 'scatter').
Alternatively, this may be specified as an instance of
plotly.graph_objs.layout.Template.
Trace templates are applied cyclically to
traces of each type. Container arrays (eg `annotations`) have special
handling: An object ending in `defaults` (eg `annotationdefaults`) is
applied to each array item. But if an item has a `templateitemname`
key we look in the template array for an item with matching `name` and
apply that instead. If no matching `name` is found we mark the item
invisible. Any named template item not referenced is appended to t
|
he
end of the array, so this can be used to add a watermark annotation or a
logo image, for example. To omit one of these items on the plot, make
an item with matching `templateitemname` and `visible: false`.""",
}
layout["template"] = template
# Renam
|
e concentrationscales to colorscale to match conventions
items = plotly_schema["traces"]["sankey"]["attributes"]["link"]["colorscales"][
"items"
]
if "concentrationscales" in items:
items["colorscale"] = items.pop("concentrationscales")
def perform_codegen():
# Set root codegen output directory
# ---------------------------------
# (relative to project root)
abs_file_path = opath.realpath(__file__)
packages_py = opath.dirname(opath.dirname(opath.dirname(abs_file_path)))
outdir = opath.join(packages_py, "plotly", "plotly")
# Delete prior codegen output
# ---------------------------
validators_pkgdir = opath.join(outdir, "validators")
if opath.exists(validators_pkgdir):
shutil.rmtree(validators_pkgdir)
graph_objs_pkgdir = opath.join(outdir, "graph_objs")
if opath.exists(graph_objs_pkgdir):
shutil.rmtree(graph_objs_pkgdir)
# plotly/datatypes is not used anymore, but was at one point so we'll
# still delete it if we find it in case a developer is upgrading from an
# older version
datatypes_pkgdir = opath.join(outdir, "datatypes")
if opath.exists(datatypes_pkgdir):
shutil.rmtree(datatypes_pkgdir)
# Load plotly schema
# ------------------
plot_schema_path = opath.join(
packages_py, "plotly", "codegen", "resources", "plot-schema.json"
)
with open(plot_schema_path, "r") as f:
plotly_schema = json.load(f)
# Preprocess Schema
# -----------------
preprocess_schema(plotly_schema)
# Build node lists
# ----------------
# ### TraceNode ###
base_traces_node = TraceNode(plotly_schema)
compound_trace_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, TraceNode
)
all_trace_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, TraceNode)
# ### LayoutNode ###
compound_layout_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, LayoutNode
)
layout_node = compound_layout_nodes[0]
all_layout_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, LayoutNode)
subplot_nodes = [
node
for node in layout_node.child_compound_datatypes
if node.node_data.get("_isSubplotObj", False)
]
layout_array_nodes = [
node
for node in layout_node.child_compound_datatypes
if node.is_array_element and node.has_child("xref") and node.has_child("yref")
]
# ### FrameNode ###
compound_frame_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, FrameNode
)
frame_node = compound_frame_nodes[0]
all_frame_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, FrameNode)
# ### All nodes ###
all_datatype_nodes = all_trace_nodes + all_layout_nodes + all_frame_nodes
all_compound_nodes = [
node
for node in all_datatype_nodes
if node.is_compound and not isinstance(node, ElementDefaultsNode)
]
# Write out validators
# --------------------
# # ### Layout ###
for node in all_layout_nodes:
write_validator_py(outdir, node)
# ### Trace ###
for node in all_trace_nodes:
write_validator_py(outdir, node)
# ### Frames ###
for node in all_frame_nodes:
write_validator_py(outdir, node)
# ### Data (traces) validator ###
write_data_validator_py(outdir, base_traces_node)
# Alls
# ----
alls = {}
# Write out datatypes
# -------------------
for node in all_compound_nodes:
write_datatype_py(outdir, node)
# ### Deprecated ###
# These are deprecated legacy datatypes like graph_objs.Marker
write_deprecated_datatypes(outdir)
# Write figure class to graph_objs
# --------------------------------
data_validator = get_data_validator_instance(base_traces_node)
layout_validator = layout_node.get_validator_instance()
frame_validator = frame_node.get_validator_instance()
write_figure_classes(
outdir,
base_traces_node,
data_validator,
layout_validator,
frame_validator,
subplot_nodes,
layout_array_nodes,
)
# Write validator __init__.py files
# ---------------------------------
# ### Write __init__.py files for each validator package ###
validator_rel_class_imports = {}
for node in all_datatype_nodes:
if node.is_mapped:
continue
key = node.parent_path_parts
validator_rel_class_imports.setdefault(key, []).append(
f"._{node.name_property}.{node.name_validator_class}"
)
# Add Data validator
root_validator_pairs = validator_rel_class_imports[()]
root_validator_pairs.append("._data.DataValidator")
# Output validator __init__.py files
validators_pkg = opath.join(outdir, "validators")
for path_parts, rel_classes in validator_rel_class_imports.items():
write_init_py(validators_pkg, path_parts, [], rel_classes)
# Write datatype __init__.py files
# --------------------------------
datatype_rel_class_imports = {}
datatype_rel_module_imports = {}
for node in all_compound_nodes:
key = node.parent_path_parts
# class import
datatype_rel_class_imports.setdefault(key, []).append(
f"._{node.name_undercase}.{no
|
saltstack/salt
|
salt/states/grains.py
|
Python
|
apache-2.0
| 15,945
| 0.001568
|
"""
Manage grains on the minion
===========================
This state allows for grains to be set.
Grains set or altered with this module are stored in the 'grains'
file on the minions, By default, this file is located at: ``/etc/salt/grains``
.. note::
This does **NOT** override any grains set in the minion config file.
"""
import re
from salt.defaults import DEFAULT_TARGET_DELIM
def exists(name, delimiter=DEFAULT_TARGET_DELIM):
"""
Ensure that a grain is set
name
The grain name
delimiter
A delimiter different from the default can be provided.
Check whether a grain exists. Does not attempt to check or set the value.
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": "Grain exists"}
_non_existent = object()
existing = __salt__["grains.get"](name, _non_existent)
if existing is _non_existent:
ret["result"] = False
ret["comment"] = "Grain does not exist"
return ret
def make_hashable(list_grain, result=None):
"""
Ensure that a list grain is hashable.
list_grain
The list grain that should be hashable
result
This function is recursive, so it must be possible to use a
sublist as parameter to the function. Should not be used by a caller
outside of the function.
Make it possible to compare two list grains to each other if the list
contains complex objects.
"""
result = result or set()
for sublist in list_grain:
if type(sublist) == list:
make_hashable(sublist, result)
else:
result.add(frozenset(sublist))
return result
def present(name, value, delimiter=DEFAULT_TARGET_DELIM, force=False):
"""
Ensure that a grain is set
.. versionchanged:: 2015.8.2
name
The grain name
value
The value to set on the grain
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: 2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
It is now capable to set a grain to a complex value (ie. lists and dicts)
and supports nested grains as well.
If the grain does not yet exist, a new grain is set to the given value. For
a nested grain, the necessary keys are created if they don't exist. If
a given key is an existing value, it will be converted, but an existing value
different from the given key will fail the state.
If the grain with the given name exists, its value is updated to the new
value unless its existing or provided value is complex (list or dict). Use
`force: True` to overwrite.
.. code-block:: yaml
cheese:
grains.present:
- value: edam
nested_grain_with_complex_value:
grains.present:
- name: icinga:Apache SSL
- value:
- command: check_https
- params: -H localhost -p 443 -S
with,a,custom,delimiter:
grains.present:
- value: yay
- delimiter: ','
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
_non_existent = object()
existing = __salt__["grains.get"](name, _non_existent)
if existing == value:
ret["comment"] = "Grain is already set"
return ret
|
if __opts__["test"]:
ret["result"] = None
if existing is _non_existent:
ret["comment"] = "Grain {} is set to be added".format(name)
|
ret["changes"] = {"new": name}
else:
ret["comment"] = "Grain {} is set to be changed".format(name)
ret["changes"] = {"changed": {name: value}}
return ret
ret = __salt__["grains.set"](name, value, force=force)
if ret["result"] is True and ret["changes"] != {}:
ret["comment"] = "Set grain {} to {}".format(name, value)
ret["name"] = name
return ret
def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 2014.1.0
Ensure the value is present in the list-type grain. Note: If the grain that is
provided in ``name`` is not present on the system, this new grain will be created
with the corresponding provided value.
name
The grain name.
value
The value is present in the list type grain.
delimiter
A delimiter different from the default ``:`` can be provided.
.. versionadded:: 2015.8.2
The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_
.. code-block:: yaml
roles:
grains.list_present:
- value: web
For multiple grains, the syntax looks like:
.. code-block:: yaml
roles:
grains.list_present:
- value:
- web
- dev
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name)
if grain:
# check whether grain is a list
if not isinstance(grain, list):
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
return ret
if isinstance(value, list):
if make_hashable(value).issubset(
make_hashable(__salt__["grains.get"](name))
):
ret["comment"] = "Value {1} is already in grain {0}".format(name, value)
return ret
elif name in __context__.get("pending_grains", {}):
# elements common to both
intersection = set(value).intersection(
__context__.get("pending_grains", {})[name]
)
if intersection:
value = list(
set(value).difference(__context__["pending_grains"][name])
)
ret[
"comment"
] = 'Removed value {} from update due to context found in "{}".\n'.format(
value, name
)
if "pending_grains" not in __context__:
__context__["pending_grains"] = {}
if name not in __context__["pending_grains"]:
__context__["pending_grains"][name] = set()
__context__["pending_grains"][name].update(value)
else:
if value in grain:
ret["comment"] = "Value {1} is already in grain {0}".format(name, value)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value {1} is set to be appended to grain {0}".format(
name, value
)
ret["changes"] = {"new": grain}
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Grain {} is set to be added".format(name)
ret["changes"] = {"new": grain}
return ret
new_grains = __salt__["grains.append"](name, value)
if isinstance(value, list):
if not set(value).issubset(set(__salt__["grains.get"](name))):
ret["result"] = False
ret["comment"] = "Failed append value {1} to grain {0}".format(name, value)
return ret
else:
if value not in __salt__["grains.get"](name, delimiter=DEFAULT_TARGET_DELIM):
ret["result"] = False
ret["comment"] = "Failed append value {1} to grain {0}".format(name, value)
return ret
ret["comment"] = "Append value {1} to grain {0}".format(name, value)
ret["changes"] = {"new": new_grains}
return ret
def list_absent(name, value, delimiter=DEFAULT_TARGET_DELIM):
"""
Delete a value from a grain formed as a list.
.. versionadded:: 2014.1.0
name
The grain name.
value
The value to delete from the grain list.
d
|
marrow/monitor.collector
|
marrow/monitor/collector/ext/load.py
|
Python
|
mit
| 1,855
| 0.012412
|
# encoding: utf-8
import os
import subprocess
import mongoengine as db
def generic_backend():
"""Allow Python to handle the details of load average discovery.
This is the fastest method, but may not be portable everywhere.
Testing on a Linux 2.6.35 Rackspace Cloud server: 17µsec.
"""
yield os.getloadavg()
def linux_backend():
"""Process the contents of /proc/loadavg.
This is the second-slowest method and is only viable on Linux hosts.
Testing on a Linux 2.6.35 Rackspace Cloud server: 40µsec.
"""
with open('/proc/loadavg', 'r') as fh:
yield [float(i) for i in fh.read().split(' ', 3)[:3]]
def posix_backend():
"""Process the output of the uptime command.
This is by far the slowest method, only to be used under dire circumstances.
Testing on a Linux 2.6.35 Rackspace Cloud server: 6.9msec.
TODO: Pass the subprocess call back up to the reactor to wait for data.
"""
yield [float(i) for i in subprocess.check_output(['uptime']).rpart
|
ition(': ')[2].strip().split(' ', 3)[:3]]
_map = {'generic': generic_backend, 'linux': linux_backend, 'posix': posix_backend, None: generic_backend}
class LoadMixIn(object):
load = db.ListField(db.FloatField, verbose_name="Load Average", default=list)
class
|
LoadExtension(object):
def __init__(self, config):
super(LoadExtension, self).__init__()
# TODO: Standard trifecta.
self.backend = _map[config.get('backend')]
@property
def mixin(self):
return LoadMixIn
def start(self):
pass
def stop(self):
pass
def __call__(self, rec):
for chunk in self.backend():
if type(chunk) != list:
yield chunk
rec.load = chunk
|
drfreemayn/ml-testing
|
sex-dice/GIFImage.py
|
Python
|
gpl-2.0
| 5,891
| 0.006451
|
"""GIFImage by Matthew Roe"""
import Image
import pygame
from pygame.locals import *
import time
class GIFImage(object):
def __init__(self, filename):
self.filename = filename
self.image = Image.open(filename)
self.frames = []
self.get_frames()
self.cur = 0
self.ptime = time.time()
self.running = True
self.breakpoint = len(self.frames)-1
self.startpoint = 0
self.reversed = False
def get_rect(self):
return pygame.rect.Rect((0,0), self.image.size)
def get_frames(self):
image = self.image
pal = image.getpalette()
base_palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
base_palette.append(rgb)
all_tiles = []
try:
while 1:
if not image.tile:
image.seek(0)
if image.tile:
all_tiles.append(image.tile[0][3][0])
image.seek(image.tell()+1)
except EOFError:
image.seek(0)
all_tiles = tuple(set(all_tiles))
try:
while 1:
try:
duration = image.info["duration"]
except:
duration = 100
duration *= .001 #convert to milliseconds!
cons = False
x0, y0, x1, y1 = (0, 0) + image.size
if image.tile:
tile = image.tile
else:
image.seek(0)
tile = image.tile
if len(tile) > 0:
x0, y0, x1, y1 = tile[0][1]
if all_tiles:
if all_tiles in ((6,), (7,)):
cons = True
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
elif all_tiles in ((7, 8), (8, 7)):
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
else:
palette = base_palette
else:
palette = base_palette
pi = pygame.image.fromstring(image.tostring(), image.size, image.mode)
pi.set_palette(palette)
if "transparency" in image.info:
pi.set_colorkey(image.info["transparency"])
pi2 = pygame.Surface(image.size, SRCALPHA)
if cons:
for i in self.frames:
pi2.blit(i[0], (0,0))
pi2.blit(pi, (x0, y0), (x0, y0, x1-x0, y1-y0))
self.frames.append([pi2, duration])
image.seek(image.tell()+1)
except EOFError:
pass
def render(self, screen, pos):
if self.running:
if time.time() - self.ptime > self.frames[self.cur][1]:
if self.reversed:
self.cur -= 1
if self.cur < self.startpoint:
self.cur = self.breakpoint
else:
self.cur += 1
if self.cur > self.breakpoint:
self.cur = self.startpoint
self.ptime = time.time()
screen.blit(self.frames[self.cur][0], pos)
def seek(self, num):
self.cur = num
if self.cur < 0:
self.cur = 0
if self.cur >= len(self.frames):
self.cur = len(self.frames)-1
def set_bounds(self, start, end):
if start < 0:
start = 0
if start >= len(self.frames):
start = len(self.frames) - 1
if end < 0:
end = 0
if end >= len(self.frames):
end = len(self.frames) - 1
if end < start:
end = start
self.startpoint = start
self.breakpoint = end
def pause(self):
self.running = False
def play(self):
self.running = True
def rewind(self):
self.seek(0)
def fastforward(self):
self.seek(self.length()-1)
def get_height(self):
return self.image.size[1]
def get_width(self):
return self.image.size[0]
def get_size(self):
return self.image.size
def length(self):
return len(self.frames)
def reverse(self):
self.reversed = not self.reversed
def reset(self):
self.cur = 0
self.ptime = time.time()
self.reversed = False
def copy(self):
new = GIFImage(self.fi
|
lename)
new.running = self.running
new.breakpoint = self.breakpoint
new.startpoint = self.startpoint
new.cur = self.cur
new.ptime = self.ptime
new.reversed = self.reversed
return new
##def main():
## pygame.init()
## screen = pygame.
|
display.set_mode((640, 480))
##
## hulk = GIFImage("hulk.gif")
## football = GIFImage("football.gif")
## hulk2 = hulk.copy()
## hulk2.reverse()
## hulk3 = hulk.copy()
## hulk3.set_bounds(0, 2)
## spiderman = GIFImage("spiderman7.gif")
##
## while 1:
## for event in pygame.event.get():
## if event.type == QUIT:
## pygame.quit()
## return
##
## screen.fill((255,255,255))
## hulk.render(screen, (50, 0))
## hulk2.render(screen, (50, 150))
## hulk3.render(screen, (50, 300))
## football.render(screen, (200, 50))
## spiderman.render(screen, (200, 150))
## pygame.display.flip()
##
##if __name__ == "__main__":
## main()
|
TonyJenkins/cfs2160-python
|
04classes/Bank/bank_account.py
|
Python
|
unlicense
| 1,282
| 0.0117
|
#!/usr/bin/env python3
# bank_account.py
#
# Simple Bank Account class example.
#
# AMJ
# 2017-04-01
from random import randint
class BankAccount:
def __init__ (self, account_holder, has_overdraft):
self.account_number = self.generate_account_number ()
self.account_holder = account_holder
self.has_overdraft = has_overdraft
self.__balance = 0.0
self.is_active = True
@property
def balance (self):
return self.__balance
def deposit (self, deposit_amount):
try:
if deposit_amount > 0:
self.__balance += deposit_amount
except TypeError:
pass
def withdraw (self, withdraw_amount):
try:
if withdraw_amount >= self.__balance or has_overdraft:
self.__balance -= withdraw_amount
except TypeError:
pass
def d
|
eactivate (self):
self.is_activ
|
e = False
def activate (self):
self.is_active = True
def generate_account_number (self):
s = ''
for i in range (9):
s += str (randint (0, 9))
return s
def __str__ (self):
return "Account: {:} Holder: {:} Balance: {:}".format (self.account_number, self.account_holder, self.balance)
|
pobear/django-xadmin
|
xadmin/util.py
|
Python
|
bsd-3-clause
| 19,558
| 0.001534
|
import django
from django.db import models
from django.db.models.sql.query import LOOKUP_SEP
from django.db.models.deletion import Collector
# from django.db.models.related import RelatedObject
from django.db.models.fields.related import ForeignObjectRel as RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import force_unicode, smart_unicode, smart_str
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
from django.conf import settings
from django.forms import Media
from django.utils.translation import get_language
import datetime
import decimal
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.templatetags.staticfiles import static
else:
from django.templatetags.static import static
try:
import json
except ImportError:
from django.utils import simplejson as json
try:
from django.utils.timezone import template_localtime as tz_localtime
except ImportError:
from django.utils.timezone import localtime as tz_localtime
try:
from django.contrib.auth import get_user_model
User = get_user_model()
username_field = User.USERNAME_FIELD
except Exception:
from django.contrib.auth.models import User
username_field = 'username'
def xstatic(*tags):
from vendors import vendors
node = vendors
fs = []
lang = get_language()
for tag in tags:
try:
for p in tag.split('.'):
node = node[p]
except Exception, e:
if tag.startswith('xadmin'):
file_type = tag.split('.')[-1]
if file_type in ('css', 'js'):
node = "xadmin/%s/%s" % (file_type, tag)
else:
raise e
else:
raise e
if type(node) in (str, unicode):
files = node
else:
mode = 'dev'
if not settings.DEBUG:
mode = getattr(settings, 'STATIC_USE_CDN',
False) and 'cdn' or 'production'
if mode == 'cdn' and mode not in node:
mode = 'production'
if mode == 'production' and mode not in node:
mode = 'dev'
files = node[mode]
files = type(files) in (list, tuple) and files or [files, ]
fs.extend([f % {'lang': lang.replace('_', '-')} for f in files])
return [f.startswith('http://') and f or static(f) for f in fs]
def vendor(*tags):
media = Media()
for tag in tags:
file_type = tag.split('.')[-1]
files = xstatic(tag)
if file_type == 'js':
media.add_js(files)
elif file_type == 'css':
media.add_css({'screen': files})
return media
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.fields.related.ForeignObjectRel) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull') and type(value) == str:
if value.lower() in ('', 'false'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' characters. Similar to urllib.quote, except that the
quoting is slightly different so that it doesn't get automatically
unquoted by the Web browser.
"""
if not isinstance(s, basestring):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
if not isinstance(s, basestring):
return s
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that sho
|
uld also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def form
|
at_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.delete_%s' % (opts.app_label, opts.model_name)
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe(u'<span class="label label-info">%s:</span> <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return mark_safe(u'<span class="label label-info">%s:</span> %s' %
(escape(capfirst(opts.verbose_name)),
escape(obj)))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr and hasattr(obj, source_attr):
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError, e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nest
|
brchiu/tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
Python
|
apache-2.0
| 165,524
| 0.007812
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.gr
|
ayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np =
|
np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.test_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gr
|
endlessm/chromium-browser
|
third_party/catapult/dashboard/dashboard/pinpoint/models/evaluators/job_serializer.py
|
Python
|
bsd-3-clause
| 10,578
| 0.005105
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import itertools
from dashboard.pinpoint.models import change as change_module
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.
|
tasks import performance_bisection
from dashboard.pinpoint.models.tasks import read_value
from dashboard.pinpoint.models.tasks import run_test
class Serializer(evaluators.DispatchByTaskType):
"""Serializes a task graph asso
|
ciated with a job.
This Serializer follows the same API contract of an Evaluator, which applies
specific transformations based on the type of a task in the graph.
The end state of the context argument is a mapping with the following schema:
{
'comparison_mode': <string>
'metric': <string>
'quests': [<string>]
'state': [
{
'attempts': [
{
'executions': [
{
'completed': <boolean>
'exception': <string>
'details': [
{
'key': <string>
'value': <string>
'url': <string>
}
]
}
]
}
]
'change': { ... }
'comparisons': {
'next': <string|None>
'prev': <string|None>
}
'result_values': [
<float>
]
}
]
}
NOTE: The 'quests' and 'executions' in the schema are legacy names, which
refers to the previous quest abstractions from which the tasks and evaluators
are derived from. We keep the name in the schema to ensure that we are
backwards-compatible with what the consumers of the data expect (i.e. the Web
UI).
"""
def __init__(self):
super(Serializer, self).__init__({
'find_isolate':
evaluators.SequenceEvaluator(
[find_isolate.Serializer(), TaskTransformer]),
'run_test':
evaluators.SequenceEvaluator(
[run_test.Serializer(), TaskTransformer]),
'read_value':
evaluators.SequenceEvaluator(
[read_value.Serializer(), TaskTransformer]),
'find_culprit':
evaluators.SequenceEvaluator(
[performance_bisection.Serializer(), AnalysisTransformer]),
})
def __call__(self, task, event, context):
# First we delegate to the task-specific serializers, and have the
# domain-aware transformers canonicalise the data in the context. We
# then do a dictionary merge following a simple protocol for editing a
# single context. This way the transformers can output a canonical set
# of transformations to build up the (global) context.
local_context = {}
super(Serializer, self).__call__(task, event, local_context)
# What we expect to see in the local context is data in the following
# form:
#
# {
# # The 'state' key is required to identify to which change and which
# # state we should be performing the actions.
# 'state': {
# 'change': {...}
# 'quest': <string>
#
# # In the quest-based system, we end up with different "execution"
# # details, which come in "quest" order. In the task-based
# # evaluation model, the we use the 'index' in the 'add_details'
# # sub-object to identify the index in the details.
# 'add_execution': {
# 'add_details': {
# 'index': <int>
# ...
# }
# ...
# }
#
# # This allows us to accumulate the resulting values we encounter
# # associated with the change.
# 'append_result_values': [<float>]
#
# # This allows us to set the comparison result for this change in
# # context of other changes.
# 'set_comparison': {
# 'next': <string|None>,
# 'prev': <string|None>,
# }
# }
#
# # If we see the 'order_changes' key in the local context, then
# # that means we can sort the states according to the changes as they
# # appear in the embedded 'changes' list.
# 'order_changes': {
# 'changes': [..]
# }
#
# # If we see the 'set_parameters' key in the local context, then
# # we can set the overall parameters we're looking to compare and
# # convey in the results.
# 'set_parameters': {
# 'comparison_mode': <string>
# 'metric': <string>
# }
# }
#
# At this point we process the context to update the global context
# following the protocol defined above.
if 'state' in local_context:
modification = local_context['state']
states = context.setdefault('state', [])
quests = context.setdefault('quests', [])
# We need to find the existing state which matches the quest and the
# change. If we don't find one, we create the first state entry for that.
state_index = None
change = modification.get('change')
for index, state in enumerate(states):
if state.get('change') == change:
state_index = index
break
if state_index is None:
states.append({'attempts': [{'executions': []}], 'change': change})
state_index = len(states) - 1
quest = modification.get('quest')
try:
quest_index = quests.index(quest)
except ValueError:
quests.append(quest)
quest_index = len(quests) - 1
add_execution = modification.get('add_execution')
append_result_values = modification.get('append_result_values')
attempt_index = modification.get('index', 0)
state = states[state_index]
if add_execution:
attempts = state['attempts']
while len(attempts) < attempt_index + 1:
attempts.append({'executions': []})
executions = state['attempts'][attempt_index]['executions']
while len(executions) < quest_index + 1:
executions.append(None)
executions[quest_index] = dict(add_execution)
if append_result_values:
state.setdefault('result_values', []).extend(append_result_values)
if 'order_changes' in local_context:
# Here, we'll sort the states according to their order of appearance in
# the 'order_changes' list.
states = context.get('state', [])
if states:
state_changes = {
change_module.ReconstituteChange(state.get('change'))
for state in states
}
order_changes = local_context.get('order_changes', {})
all_changes = order_changes.get('changes', [])
comparisons = order_changes.get('comparisons', [])
result_values = order_changes.get('result_values', [])
change_index = {
change: index for index, change in enumerate(
known_change for known_change in all_changes
if known_change in state_changes)
}
ordered_states = [None] * len(states)
for state in states:
index = change_index.get(
change_module.ReconstituteChange(state.get('change')))
if index is not None:
ordered_states[index] = state
# Merge in the comparisons as they appear for the ordered_states.
for state, comparison, result in itertools.izip_longest(
ordered_states, comparisons or [], result_values or []):
if state is None:
continue
if comparison is not None:
state['comparisons'] = comparison
state['result_values'] = result or []
context['state'] = ordered_states
context['difference_count'] =
|
yotchang4s/cafebabepy
|
src/main/python/idlelib/outwin.py
|
Python
|
bsd-3-clause
| 4,385
| 0.000456
|
import re
from tkinter import *
import tkinter.messagebox as tkMessageBox
from idlelib.editor import EditorWindow
from idlelib import iomenu
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
"""
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Cus
|
tomize EditorWindow
def ispythonsource(self, filename):
# No colorization needed
return 0
def short_title(self):
return "Output"
def maybesave(self):
# Override base class method -- don't ask any questions
if self.get_saved():
return "yes"
else:
|
return "no"
# Act as output file
def write(self, s, tags=(), mark="insert"):
if isinstance(s, (bytes, bytes)):
s = s.decode(iomenu.encoding, "replace")
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
return len(s)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
# Our own right-button menu
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Go to file/line", "<<goto-file-line>>", None),
]
file_line_pats = [
# order of patterns matters
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'^(\s*\S.*?):\s*(\d+):', # Win filename, maybe starting with spaces
r'([^\s]+):\s*(\d+):', # filename or path, ltrim
r'^\s*(\S.*?):\s*(\d+):', # Win abs path with embedded spaces, ltrim
]
file_line_progs = None
def goto_file_line(self, event=None):
if self.file_line_progs is None:
l = []
for pat in self.file_line_pats:
l.append(re.compile(pat, re.IGNORECASE))
self.file_line_progs = l
# x, y = self.event.x, self.event.y
# self.text.mark_set("insert", "@%d,%d" % (x, y))
line = self.text.get("insert linestart", "insert lineend")
result = self._file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = self._file_line_helper(line)
if not result:
tkMessageBox.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
parent=self.text)
return
filename, lineno = result
edit = self.flist.open(filename)
edit.gotoline(lineno)
def _file_line_helper(self, line):
for prog in self.file_line_progs:
match = prog.search(line)
if match:
filename, lineno = match.group(1, 2)
try:
f = open(filename, "r")
f.close()
break
except OSError:
continue
else:
return None
try:
return filename, int(lineno)
except TypeError:
return None
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write
|
sunlightlabs/openstates
|
scrapers/nc/__init__.py
|
Python
|
gpl-3.0
| 12,386
| 0.000242
|
import lxml
from utils import State
from .people import NCPersonScraper
from .bills import NCBillScraper
# from .committees import NCCommitteeScraper
class NorthCarolina(State):
scrapers = {
"people": NCPersonScraper,
# 'committees': NCCommitteeScraper,
"bills": NCBillScraper,
}
legislative_sessions = [
{
"_scraped_name": "1985-1986 Session",
"classification": "primary",
"identifier": "1985",
"name": "1985-1986 Session",
"start_date": "1985-02-05",
"end_date": "1986-07-18",
},
{
"_scraped_name": "1986 Special Session",
"classification": "special",
"identifier": "1985E1",
"name": "1986 Special Session",
"start_date": "1986-02-18",
"end_date": "1986-06-05",
},
{
"_scraped_name": "1987-1988 Session",
"classification": "primary",
"identifier": "1987",
"name": "1987-1988 Session",
"start_date": "1987-02-09",
"end_date": "1988-07-12",
},
{
"_scraped_name": "1989-1990 Session",
"classification": "primary",
"identifier": "1989",
"name": "1989-1990 Session",
"start_date": "1989-01-11",
"end_date": "1990-07-28",
},
{
"_scraped_name": "1989 Special Session",
"classification": "special",
"identifier": "1989E1",
"name": "1989 Extra Session",
"start_date": "1989-12-07",
"end_date": "1989-12-07",
},
{
"_scraped_name": "1990 Special Session",
"classification": "special",
"identifier": "1989E2",
"name": "1990 Extra Session",
"start_date": "1990-03-06",
"end_date": "1990-03-06",
},
{
"_scraped_name": "1991-1992 Session",
"classification": "primary",
"identifier": "1991",
"name": "1991-1992 Session",
"start_date": "1991-01-30",
"end_date": "1992-07-25",
},
{
"_scraped_name": "1991 Special Session",
"classification": "special",
"identifier": "1991E1",
"name": "1991 Special Session",
"start_date": "1991-12-30",
"end_date": "1992-02-03",
},
{
"_scraped_name": "1993-1994 Session",
"classification": "primary",
"identifier": "1993",
"name": "1993-1994 Session",
"start_date": "1993-01-27",
"end_date": "1994-07-17",
},
{
"_scraped_name": "1994 Special Session",
"classification": "special",
"identifier": "1993E1",
"name": "1994 Special Session",
"start_date": "1994-02-08",
"end_date": "1994-03-26",
},
{
"_scraped_name": "1995-1996 Session",
"classification": "primary",
"identifier": "1995",
"name": "1995-1996 Session",
"start_date": "1995-01-25",
"end_date": "1996-06-21",
},
{
"_scraped_name": "1996 1st Special Session",
"classification": "special",
"identifier": "1995E1",
"name": "1996 Special Session 1",
"start_date": "1996-02-21",
"end_date": "1996-02-21",
},
{
"_scraped_name": "1996 2nd Special Session",
"classification": "special",
"identifier": "1995E2",
"name": "1996 Special Session 2",
"start_date": "1996-07-08",
"end_date": "1996-08-03",
},
{
"_scraped_name": "1997-1998 Session",
"classification": "primary",
"identifier": "1997",
"name": "1997-1998 Session",
"start_date": "1997-01-29",
"end_date": "1998-10-29",
},
{
"_scraped_name": "1998 Special Session",
"classification": "special",
"identifier": "1997E1",
"name": "1998 Special Session",
"start_date": "1998-03-24",
"end_date": "1998-05-11",
},
{
"_scraped_name": "1999-2000 Session",
"classification": "primary",
"identifier": "1999",
"name": "1999-2000 Session",
"start_date": "1999-01-27",
"end_date": "2000-07-13",
},
{
"_scraped_name": "1999 Special Session",
"classification": "special",
"identifier": "1999E1",
"name": "1999 Special Session",
"start_date": "1999-12-15",
"end_date": "1999-12-16",
},
{
"_scraped_name": "2000 Special Session",
"classification": "special",
"identifier": "1999E2",
"name": "2000 Special Session",
"start_date": "2000-04-05",
"end_date": "2000-04-05",
},
{
"_scraped_name": "2001-2002 Session",
"classification": "primary",
"identifier": "2001",
"name": "2001-2002 Session",
"start_date": "2001-01-24",
"end_date": "2002-11-13",
},
{
"_scraped_name": "2002 Extra Session",
"classification": "special",
"identifier": "2001E1",
"name": "2002 Extra Session on Redistricting",
"start_date": "2002-05-14",
"end_date": "2002-11-26",
},
{
"_scraped_name": "2003-2004 Session",
"classification": "primary",
"identifier": "2003",
"name": "2003-2004 Session",
"start_date": "2003-01-29",
"end_date": "2004-07-18",
},
{
"_scraped_name": "2003 Extra Session",
"classification": "special",
"identifier": "2003E1",
"name": "2003 Extra Session on Redistricting",
"start_date": "2003-11-24",
"end_date": "2003-11-25",
},
{
"_scraped_name": "2003 Extra Session on Economic Development Issues",
"classification": "special",
"identifier": "2003E2",
"name": "2003 Extra Session on Economic Development Issues",
"start_date": "2003-12-09",
"end_date": "2003-12-10",
},
{
"_scraped_name": "2004 Extra Session",
"classification": "special",
"identifier": "2003E3",
"name": "2004 Extra Session on Economic Development Issues",
"start_date": "2004-11-04",
"end_date": "2004-11-04",
},
{
"_scraped_name": "2005-2006 Session",
"classification": "primary",
"identifier": "2005",
"name": "2005-2006 Session",
"start_date": "2005-01-26",
"end_date": "2006-07-28",
},
{
"_scraped_name": "2007-2008 Session",
"classification": "primary",
"identifier": "2007",
"name": "2007-2008 Session",
"start_date": "2007-01-24",
"end_date": "2008-07-18",
},
{
"_scraped_name": "2007 Extra Session",
"classification": "special",
"identifier": "2007E1",
"name": "2007 Extra Session",
"start_date": "2007-09-10",
"end_date": "2007-09-
|
11",
},
{
"_scraped_name": "2008 Extra Session",
"classification": "special",
"identifier": "2007E2",
"name": "2008
|
Extra Session",
"start_date": "2008-03-20",
"end_date": "2008-03-20",
},
{
"_scraped_name": "2009-2010 Session",
"classification": "primary",
"identifier": "2009",
"name": "2009-2010 Session",
|
RRCKI/panda-server
|
pandaserver/brokerage/broker_util.py
|
Python
|
apache-2.0
| 16,126
| 0.014015
|
import re
import urllib
import time
import sys
import types
import datetime
import commands
import xml.dom.minidom
from config import panda_config
from pandalogger.LogWrapper import LogWrapper
from pandalogger.PandaLogger import PandaLogger
_log = PandaLogger().getLogger('broker_util')
# curl class
class _Curl:
# constructor
def __init__(self,useProxy=False):
# path to curl
self.path = 'curl --user-agent "dqcurl" -m 180'
# verification of the host certificate
self.verifyHost = False
# use proxy
if useProxy and panda_config.httpProxy != '':
self.path = 'env http_proxy=%s %s' % (panda_config.httpProxy,self.path)
# GET method
def get(self,url,data={}):
# make command
com = '%s --silent --get' % self.path
if not self.verifyHost:
com += ' --insecure'
# data
for key,value in data.iteritems():
com += ' --data "%s"' % urllib.urlencode({key:value})
com += ' %s' % url
# execute
_log.debug(com)
ret = commands.getstatusoutput(com)
_log.debug(ret)
return ret
# get default storage
def _getDefaultStorage(baseURL,sePath=None,seProdPath={}):
_log.debug('_getDefaultStorage (%s %s %s)' % (baseURL,sePath,seProdPath))
# use se+seprodpath when baseURL=''
if baseURL=='':
# get token
match = re.search('^token:([^:]+):',sePath)
if match == None:
_log.error("could not get token from %s" % sePath)
return ""
token = match.group(1)
# get corresponding path
if not seProdPath.has_key(token):
_log.error("could not find path for % in %s" % (token,seProdPath))
return ""
# set se+seprodpath
out = sePath+seProdPath[token]
# append /
if not out.endswith('/'):
out += '/'
_log.debug(out)
else:
# check port to set proxy
useProxy = False
if panda_config.httpProxy != '':
pMatch = re.search('http://[^:/]+:*(\d+)/',baseURL)
if pMatch == None:
# default port
useProxy = True
elif pMatch.group(1) == '80':
# standard port
useProxy = True
# instantiate curl
curl = _Curl(useProxy)
# get default storage
url = baseURL + 'storages/default'
status,out = curl.get(url)
_log.debug(out)
if status != 0:
_log.error("could not get default storage from %s:%s" % (baseURL,status))
return ""
# parse
match = re.search('^[^/]+://[^/]+(/.+)$',out)
if match == None:
_log.error("could not parse string : %s" % out)
return ""
return match.group(1)
# get PoolFileCatalog
def _getPoolFileCatalog(lfns,dq2url):
_log.debug('_getPoolFileCatalog')
# check port to set proxy
useProxy = False
if panda_config.httpProxy != '':
pMatch = re.search('http://[^:/]+:*(\d+)/',dq2url)
if pMatch == None:
# default port
useProxy = True
elif pMatch.group(1) == '80':
# standard port
useProxy = True
# instantiate curl
curl = _Curl(useProxy)
# get PoolFileCatalog
iLFN = 0
outXML =''
strLFNs = ''
if not dq2url.endswith('_'):
url = dq2url + '/lrc/PoolFileCatalog'
else:
# NDGF LRC
url = dq2url + 'lrc/PoolFileCatalog'
for lfn in lfns:
iLFN += 1
# make argument
strLFNs += '%s ' % lfn
if iLFN % 40 == 0 or iLFN == len(lfns):
# get PoolFileCatalog
strLFNs = strLFNs.rstrip()
data = {'lfns':strLFNs}
# avoid too long argument
strLFNs = ''
# execute
status,out = curl.get(url,data)
_log.debug(status)
# sleep
time.sleep(2)
if status != 0:
_log.error("_getPoolFileCatalog : %s %s %s" % (dq2url,status,out))
return status
if status != 0 or out.startswith('Error'):
continue
if not out.startswith('<?xml'):
continue
# append
outXML += out
# remove redundant trailer and header
th = \
"""
</POOLFILECATALOG><\?xml version="1.0" encoding="UTF-8" standalone="no" \?>
<!-- Edited By POOL -->
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<POOLFILECATALOG>
"""
outXML = re.sub(th,'',outXML)
outXML = re.sub("""\s*<META name="fsize" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="md5sum" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="lastmodified" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="archival" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="permanent" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="adler32" type="string"/>""",'',outXML)
# return XML
return outXML
# get files from MySQL
def _getPFNFromMySQL(lfns,dq2url):
_log.debug('_getPFNFromMySQL')
import MySQLdb
comment = ' /* broker_util._getPFNFromMySQL */'
outStr = ''
# parse connection string
match = re.search('^mysql://([^:]+):([^@]+)@([^/:]+):(\d+)/(.+)$',dq2url)
if match == None:
return outStr
# parameters for DB connection
connStr = "mysql -h %s -u %s -p%s -P %s %s"
dbhost = match.group(3)
dbuser = match.group(1)
dbpswd = match.group(2)
dbport = int(match.group(4))
dbname = match.group(5)
connStr = "mysql -h %s -u %s -p%s -P %s %s" % (dbhost,dbuser,dbpswd,dbport,dbname)
try:
_log.debug(connStr)
# connect
dbConn = MySQLdb.connect(db=dbname,host=dbhost,port=dbport,user=dbuser,passwd=dbpswd)
# make cursor
dbCur = dbConn.cursor()
# query files
iLFN = 0
strLFNs = ''
for lfn in lfns:
iLFN += 1
# make argument
strLFNs += " lfname='%s' OR " % lfn
if iLFN % 40 == 0 or iLFN == len(lfns):
# get PoolFileCatalog
strLFNs = strLFNs[:-3]
# construct SQL
sql = 'SELECT lfname FROM t_lfn WHERE %s' % strLFNs
# reset
strLFNs = ''
# execute
_log.debug(sql)
dbCur.execute(sql+comment)
res = dbCur.fetchall()
_log.debug(r
|
es)
# append LFNs
if res
|
!= None and len(res) != 0:
for resLFN in res:
outStr += '%s ' % resLFN
# close cursor
dbCur.close()
# close connection
dbConn.close()
except:
type, value, traceBack = sys.exc_info()
_log.error("_getPFNFromMySQL : %s %s %s" % (dq2url,type,value))
return -1
# return
return outStr
# get files from LFC
def _getPFNFromLFC(lfns,dq2url,guids,storageName,scopeList=[],tmpLog=None):
if tmpLog == None:
tmpLog = LogWrapper(_log,logPrefix)
tmpLog.debug('_getPFNFromLFC %s %s / %s LFNs:%s %s' % (dq2url,str(storageName),
len(lfns),str(lfns[:3]),str(scopeList[:3])))
outStr = ''
# check paramter
if guids == [] or storageName == [] or (len(lfns) != len(guids)):
tmpLog.debug('_getPFNFromLFC done with empty list')
return outStr
# check scopeList
if not scopeList in [None,[]] and len(lfns) != len(scopeList):
tmpLog.warning('_getPFNFromLFC wrong scopeList %s %s %s %s' % (dq2url,str(storageName),
str(lfns),str(scopeList)))
tmpLog.error('_getPFNFromLFC failed')
return outStr
# loop over all LFNs
iLFN = 0
nLFN = 1000
strFiles = ''
outStr = ''
for iLFN in range(len(lfns)):
if scop
|
ellisonbg/altair
|
tools/generate_schema_wrapper.py
|
Python
|
bsd-3-clause
| 18,648
| 0.001448
|
"""Generate a schema wrapper from a schema"""
import copy
import os
import sys
import json
from os.path import abspath, join, dirname
import textwrap
from urllib import request
import m2r
# import schemapi from here
sys.path.insert(0, abspath(dirname(__file__)))
from schemapi import codegen
from schemapi.codegen import CodeSnippet
from schemapi.utils import get_valid_identifier, SchemaInfo, indent_arglist
class SchemaGenerator(codegen.SchemaGenerator):
def _process_description(self, description):
description = m2r.convert(description)
description = description.replace(m2r.prolog, '')
description = description.replace(":raw-html-m2r:", ":raw-html:")
description = description.replace(r'\ ,', ',')
description = description.replace(r'\ ', ' ')
return description.strip()
def schema_class(*args, **kwargs):
return SchemaGenerator(*args, **kwargs).schema_class()
SCHEMA_URL_TEMPLATE = ('https://vega.github.io/schema/'
'{library}/{version}.json')
SCHEMA_VERSION = {
'vega': {
'v2': 'v2.6.5',
'v3': 'v3.3.1'
},
'vega-lite': {
'v1': 'v1.3.1',
'v2': 'v2.4.3'
}
}
BASE_SCHEMA = """
class {basename}(SchemaBase):
@classmethod
def _default_wrapper_classes(cls):
return {basename}.__subclasses__()
"""
LOAD_SCHEMA = '''
import pkgutil
import json
def load_schema():
"""Load the json schema associated with this module's functions"""
return json.loads(pkgutil.get_data(__name__, '{schemafile}').decode('utf-8'))
'''
CHANNEL_MIXINS = """
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
if self.shorthand is Undefined:
kwds = {}
elif isinstance(self.shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(shorthand, **kwds).to_dict()
for shorthand in self.shorthand]
elif isinstance(self.shorthand, six.string_types):
kwds = parse_shorthand(self.shorthand, data=context.get('data', None))
type_defined = self._kwds.get('type', Undefined) is not Undefined
if not (type_defined or 'type' in kwds):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{0} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(self.shorthand))
else:
raise ValueError("{0} encoding field is specified without a type; "
"the type cannot be automacially inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(self.shorthand))
else:
# shorthand is not a string; we pass the definition to field
if self.field is not Undefined:
raise ValueError("both shorthand and field specified in {0}"
"".format(self.__class__.__name__))
# field is a RepeatSpec or similar; cannot infer type
kwds = {'field': self.shorthand}
# set shorthand to Undefined, because it's not part of the schema
self.shorthand = Undefined
self._kwds.update({k: v for k, v in kwds.items()
if self._kwds.get(k, Undefined) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy()
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
"""
class FieldSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent('''
class {classname}(FieldChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
{init_code}
''')
class ValueSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent('''
class {classname}(ValueChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
{init_code}
''')
HEADER = """\
# -*- coding: utf-8 -*-
#
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
"""
def schema_url(library, version):
version = SCHEMA_VERSION[library][vers
|
ion]
return SCHEMA_URL_TEMPLATE.format(library=library, version=version)
def download_schemafile(library, version, schemapath):
url = schema_url(library, version)
filename = os.path.join(schemapath, '{lib
|
rary}-schema.json'.format(library=library))
request.urlretrieve(url, filename)
return filename
def copy_schemapi_util():
"""
Copy the schemapi utility and its test file into altair/utils/
"""
# copy the schemapi utility file
source_path = abspath(join(dirname(__file__), 'schemapi', 'schemapi.py'))
destination_path = abspath(join(dirname(__file__), '..', 'altair',
'utils', 'schemapi.py'))
print("Copying\n {0}\n -> {1}".format(source_path, destination_path))
with open(source_path, 'r', encoding='utf8') as source:
with open(destination_path, 'w', encoding='utf8') as dest:
dest.write(HEADER)
dest.writelines(source.readlines())
# Copy the schemapi test file
source_path = abspath(join(dirname(__file__), 'schemapi',
'tests', 'test_schemapi.py'))
destination_path = abspath(join(dirname(__file__), '..', 'altair',
'utils', 'tests', 'test_schemapi.py'))
print("Copying\n {0}\n -> {1}".format(source_path, destination_path))
with open(source_path, 'r', encoding='utf8') as source:
with open(destination_path, 'w', encoding='utf8') as dest:
dest.write(HEADER)
dest.writelines(source.readlines())
def generate_vegalite_schema_wrapper(schema_file):
"""Generate a schema wrapper at the given path."""
# TODO: generate simple tests for each wrapper
basename = 'VegaLiteSchema'
with open(schema_file, encoding='utf8') as f:
rootschema = json.load(f)
contents = [HEADER,
"from altair.utils.schemapi import SchemaBase, Undefined",
LOAD_SCHEMA.format(schemafile='vega-lite-schema.json')]
contents.append(BASE_SCHEMA.format(basename=basename))
contents.append(schema_class('Root', schema=rootschema, basename=basename,
schemarepr=CodeSnippet('load_schema()')))
for name in rootschema['definitions']:
defschema = {'$ref': '#/definitions/' + name}
defschema_repr = {'$ref': '#/definitions/' + name}
contents.append(schema_class(get_valid_identifier(name),
schema=defschema, schemarepr=defschema_repr,
rootschema=rootschema, basename=basename,
rootschemarepr=CodeSnippet("Root._schema")))
contents.append('') # end with newline
return '\n'.join(contents)
def ge
|
EEEManchester/Food-Computer
|
Software/MQTT Test GUI/MQTT_GUI/main.py
|
Python
|
mit
| 7,742
| 0.005425
|
from MQTT_UI import Ui_MainWindow #Generated by Qt Designer
from PyQt4 import QtCore, QtGui #for gui
import paho.mqtt.client as mqtt #for mqtt
import sys #for exit
class StartQT4(QtGui.QMainWindow):
client1 = mqtt.Client() #for raspberry pi
client2 = mqtt.Client() #for simple mqtt test
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#<Buttons>######################################################################################
self.ui.pushButton_1.clicked.connect(self.pushButton_1)
self.ui.pushButton_2.clicked.connect(self.pushButton_2)
self.ui.pushButton_3.clicked.connect(self.pushButton_3)
self.ui.pushButton_4.clicked.connect(self.pushButton_4)
self.ui.pushButton_5.clicked.connect(self.pushButton_5)
self.ui.pushButton_6.clicked.connect(self.pushButton_6)
self.ui.pushButton_7.clicked.connect(self.pushButton_7)
self.ui.pushButton_8.clicked.connect(self.pushButton_8)
self.ui.pushButton_9.clicked.connect(self.pushButton_9)
self.ui.pushButton_10.clicked.connect(self.pushButton_10)
self.ui.pushButton_11.clicked.connect(self.pushButton_11)
self.ui.pushButton_12.clicked.connect(self.pushButton_12)
self.ui.pushButton_13.clicked.connect(self.pushButton_13)
self.ui.pushButton_14.clicked.connect(self.pushButton_14)
self.ui.pushButton_15.clicked.connect(self.pushButton_15)
self.ui.pushButton_16.clicked.connect(self.pushButton_16)
self.ui.pushButton_17.clicked.connect(self.pushButton_17)
self.ui.pushButton_18.clicked.connect(self.pushButton_18)
self.ui.pushButton_19.clicked.connect(self.pushButton_19)
self.ui.pushButton_20.clicked.connect(self.pushButton_20)
self.ui.pushButton_21.clicked.connect(self.pushButton_21)
self.ui.pushButton_22.clicked.connect(self.pushButton_22)
self.ui.pushButton_23.clicked.connect(self.pushButton_23)
self.ui.pushButton_24.clicked.connect(self.pushButton_24)
#</Buttons>#####################################################################################
#<MQTT Calbacks>####################################################################################
# Called when client1 is connected
def on_connect1(client, userdata, flags, rc):
print("Client 1: Connected with result code " + str(rc)) #'0' means ok
self.client1.subscribe(str(self.ui.lineEdit_4.text()))
# Called when a message has been received on a topic that the client subscribes to.
def on_message1(client, userdata, msg):
print('Client: 1, ' + 'Topic: ' + msg.topic + ', Payload: ' + str(msg.payload))
self.updateDisplay(str(msg.payload))
# Called when the client disconnects from the broker.
def on_disconnect1(client, userdata, rc):
if rc != 0:
print("Client 1: Unexpected disconnection.")
else:
print("Client 1: Clean disconnect.")
# Called when client2 is connected
def on_connect2(client, userdata, flags, rc):
print("Client 2: Connected with result code " + str(rc)) #'0' means ok
# Called when a message has been received on a topic that the client subscribes to.
def on_message2(client, userdata, msg):
print('Client: 2, ' + 'Topic: ' + msg.topic + ', Payload: ' + str(msg.payload))
self.ui.lineEdit_9.setText(str(msg.payload).split('\'')[1])
# Called when the client disconnects from the broker.
def on_disconnect2(client, userdata, rc):
if rc != 0:
print("Client 2: Unexpected disconnection.")
else:
print("Client 2: Clean disconnect.")
# assigning each client to its own callback funtion.
# (callback is a function that will be automatically called when an event occurred)
self.client1.on_connect = on_connect1
self.client1.on_disconnect = on_disconnect1
self.client1.on_message = on_message1
self.client2.on_connect = on_connect2
self.client2.on_disconnect = on_disconnect2
self.client2.on_message = on_message2
# </MQTT Calbacks>###################################################################################
###<button_methods>####
def pushButton_1(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'A')
def pushButton_2(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'B')
def pushButton_3(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'C')
def pushButton_4(self):
self.client1.pub
|
lish(str(self.ui.lineEdit_3.text()), 'D')
def pushButton_5(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'E')
def pushButton_6(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'F')
def pushButton_7(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'G')
def pushButton_8(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'H')
def pushButton_9(self):
self.client1.publish(str(
|
self.ui.lineEdit_3.text()), 'I')
def pushButton_10(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'J')
def pushButton_11(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'K')
def pushButton_12(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'L')
def pushButton_13(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'M')
def pushButton_14(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'N')
def pushButton_15(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'O')
def pushButton_16(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'P')
def pushButton_17(self):
self.client1.loop_start() # creates a new thread for client1
self.client1.connect(str(self.ui.lineEdit_1.text()), int(self.ui.lineEdit_2.text()))
def pushButton_18(self):
self.client1.reconnect()
def pushButton_19(self):
self.client2.loop_start() # creates a new thread for client2
self.client2.connect(str(self.ui.lineEdit_5.text()), int(self.ui.lineEdit_6.text()))
def pushButton_20(self):
self.client2.disconnect()
def pushButton_21(self):
self.client2.reconnect()
def pushButton_22(self):
self.client2.publish(str(self.ui.lineEdit_8.text()), str(self.ui.lineEdit_10.text()))
def pushButton_23(self):
self.client2.subscribe(str(self.ui.lineEdit_7.text()))
def pushButton_24(self):
self.client2.unsubscribe(str(self.ui.lineEdit_7.text()))
###</buttons_methods>###
def updateDisplay(self, pl): #assumes data is csv formatted
payload = str(pl)
payload = payload.split('\'')
payload = payload[1].split(',')
self.ui.progressBar_1.setValue(float(payload[0]))
self.ui.progressBar_2.setValue(float(payload[1]))
self.ui.progressBar_3.setValue(float(payload[2]))
self.ui.progressBar_4.setValue(float(payload[3]))
self.ui.progressBar_5.setValue(float(payload[4]))
self.ui.progressBar_6.setValue(float(payload[5]))
self.ui.progressBar_7.setValue(float(payload[6]))
self.ui.progressBar_8.setValue(float(payload[7]))
self.ui.progressBar_9.setValue(float(payload[8]))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
MainWindow = StartQT4()
MainWindow.show()
sys.exit(app.exec_())
|
danielpronych/pyramid-doxygen
|
pyramid/tests/test_util.py
|
Python
|
bsd-2-clause
| 21,474
| 0.001537
|
import unittest
from pyramid.compat import PY3
class Test_InstancePropertyMixin(unittest.TestCase):
def _makeOne(self):
cls = self._getTargetClass()
class Foo(cls):
pass
return Foo()
def _getTargetClass(self):
from pyramid.util import InstancePropertyMixin
return InstancePropertyMixin
def test_callable(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker)
foo.bar = 1
self.assertEqual(1, foo.worker)
foo.bar = 2
self.assertEqual(2, foo.worker)
def test_callable_with_name(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, name='x')
foo.bar = 1
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
def test_callable_with_reify(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, reify=True)
foo.bar = 1
self.assertEqual(1, foo.worker)
foo.bar = 2
self.assertEqual(1, foo.worker)
def test_callable_with_name_reify(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, name='x')
foo.set_property(worker, name='y', reify=True)
foo.bar = 1
self.assertEqual(1, foo.y)
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
self.assertEqual(1, foo.y)
def test_property_without_name(self):
def worker(obj): pass
foo = self._makeOne()
self.assertRaises(ValueError, foo.set_property, property(worker))
def test_property_with_name(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(property(worker), name='x')
foo.bar = 1
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
def test_property_with_reify(self):
def worker(obj): pass
foo = self._makeOne()
self.assertRaises(ValueError, foo.set_property,
property(worker), name='x', reify=True)
def test_override_property(self):
def worker(obj): pass
foo = self._makeOne()
foo.set_property(worker, name='x')
def doit():
foo.x = 1
self.assertRaises(AttributeError, doit)
def test_override_reify(self):
def worker(obj): pass
foo = self._makeOne()
foo.set_property(worker, name='x', reify=True)
foo.x = 1
self.assertEqual(1, foo.x)
foo.x = 2
self.assertEqual(2, foo.x)
def test_reset_property(self):
foo = self._makeOne()
foo.set_property(lambda _: 1, name='x')
self.assertEqual(1, foo.x)
foo.set_property(lambda _: 2, name='x')
self.assertEqual(2, foo.x)
def test_reset_reify(self):
""" This is questionable behavior, but may as well get notified
if it changes."""
foo = self._makeOne()
foo.set_property(lambda _: 1, name='x', reify=True)
self.assertEqual(1, foo.x)
foo.set_property(lambda _: 2, name='x', reify=True)
self.assertEqual(1, foo.x)
def test__make_property(self):
from pyramid.decorator import reify
cls = self._getTargetClass()
name, fn = cls._make_property(lambda x: 1, name='x', reify=True)
self.assertEqual(name, 'x')
self.assertTrue(isinstance(fn, reify))
def test__set_properties_with_iterable(self):
foo = self._makeOne()
x = foo._make_property(lambda _: 1, name='x', reify=True)
y = foo._make_property(lambda _: 2, name='y')
foo._set_properties([x, y])
self.assertEqual(1, foo.x)
self.assertEqual(2, foo.y)
def test__set_properties_with_dict(self):
foo = self._makeOne()
x_name, x_fn = foo._make_property(lambda _: 1, name='x', reify=True)
y_name, y_fn = foo._make_property(lambda _: 2, name='y')
foo._set_properties({x_name: x_fn, y_name: y_fn})
self.assertEqual(1, foo.x)
self.assertEqual(2, foo.y)
def test__set_extensions(self):
inst = self._makeOne()
def foo(self, result):
return result
n, bar = inst._make_property(lambda _: 'bar', name='bar')
class Extensions(object):
def __init__(self):
self.methods = {'foo':foo}
self.descriptors = {'bar':bar}
extensions = Extensions()
inst._set_extensions(extensions)
self.assertEqual(inst.bar, 'bar')
self.assertEqual(inst.foo('abc'), 'abc')
class Test_WeakOrderedSet(unittest.TestCase):
def _makeOne(self):
from pyramid.config import WeakOrderedSet
return WeakOrderedSet()
def test_ctor(self):
wos = self._makeOne()
self.assertEqual(len(wos), 0)
self.assertEqual(wos.last, None)
def test_add_item(s
|
elf):
wos = self._makeOne()
reg = Dummy()
wos.add(reg)
self.assertEqual(list(wos), [reg])
self.assertTrue(reg in wos)
self.assertEqual(wos.last, reg)
def test_add_multiple_items(se
|
lf):
wos = self._makeOne()
reg1 = Dummy()
reg2 = Dummy()
wos.add(reg1)
wos.add(reg2)
self.assertEqual(len(wos), 2)
self.assertEqual(list(wos), [reg1, reg2])
self.assertTrue(reg1 in wos)
self.assertTrue(reg2 in wos)
self.assertEqual(wos.last, reg2)
def test_add_duplicate_items(self):
wos = self._makeOne()
reg = Dummy()
wos.add(reg)
wos.add(reg)
self.assertEqual(len(wos), 1)
self.assertEqual(list(wos), [reg])
self.assertTrue(reg in wos)
self.assertEqual(wos.last, reg)
def test_weakref_removal(self):
wos = self._makeOne()
reg = Dummy()
wos.add(reg)
wos.remove(reg)
self.assertEqual(len(wos), 0)
self.assertEqual(list(wos), [])
self.assertEqual(wos.last, None)
def test_last_updated(self):
wos = self._makeOne()
reg = Dummy()
reg2 = Dummy()
wos.add(reg)
wos.add(reg2)
wos.remove(reg2)
self.assertEqual(len(wos), 1)
self.assertEqual(list(wos), [reg])
self.assertEqual(wos.last, reg)
def test_empty(self):
wos = self._makeOne()
reg = Dummy()
reg2 = Dummy()
wos.add(reg)
wos.add(reg2)
wos.empty()
self.assertEqual(len(wos), 0)
self.assertEqual(list(wos), [])
self.assertEqual(wos.last, None)
class Test_object_description(unittest.TestCase):
def _callFUT(self, object):
from pyramid.util import object_description
return object_description(object)
def test_string(self):
self.assertEqual(self._callFUT('abc'), 'abc')
def test_int(self):
self.assertEqual(self._callFUT(1), '1')
def test_bool(self):
self.assertEqual(self._callFUT(True), 'True')
def test_None(self):
self.assertEqual(self._callFUT(None), 'None')
def test_float(self):
self.assertEqual(self._callFUT(1.2), '1.2')
def test_tuple(self):
self.assertEqual(self._callFUT(('a', 'b')), "('a', 'b')")
def test_set(self):
if PY3: # pragma: no cover
self.assertEqual(self._callFUT(set(['a'])), "{'a'}")
else: # pragma: no cover
self.assertEqual(self._callFUT(set(['a'])), "set(['a'])")
def test_list(self):
self.assertEqual(self._callFUT(['a']), "['a']")
def test_dict(self):
self.assertEqual(self._callFUT({'a':1}), "{'a': 1}")
def test_nomodule(self):
o = object()
self.assertEqual(self._callFUT(o), 'object %s' % str(o))
def test_module(self):
import pyramid
self.assertEqual(self._callFUT(pyramid), 'module pyramid')
def test_method(self):
self.assertEqual(
self._callFUT(self.test_method),
|
vladikoff/fxa-mochitest
|
tests/config/mozharness/marionette.py
|
Python
|
mpl-2.0
| 2,925
| 0.000342
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
config = {
"suite_definitions": {
"gaiatest_desktop": {
"options": [
"--restart",
"--timeout=%(timeout)s",
"--type=%(type)s",
"--testvars=%(testvars)s",
"--profile=%(profile)s",
"--symbols-path=%(symbols_path)s",
"--gecko-log=%(gecko_log)s",
"--xml-output=%(xml_output)s",
"--html-output=%(html_output)s",
"--log-raw=%(raw_log_file)s",
"--binary=%(binary)s",
"--address=%(address)s",
"--total-chunks=%(total_chunks)s",
"--this-chunk=%(this_chunk)s"
],
"run_filename": "",
"testsdir": ""
},
"gaiatest_emulator": {
"options": [
"--restart",
"--timeout=%(timeout)s",
"--type=%(type)s",
"--testvars=%(testvars)s",
"--profile=%(profile)s",
"--symbols-path=%(symbols_path)s",
"--xml-output=%(xml_output)s",
"--html-output=%(html_output)s",
"--log-raw=%(raw_log_file)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s"
],
"run_filename": "",
"testsdir": ""
},
"marionette_desktop": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--binary=%(binary)s",
"--address=%(address)s",
"--symbols-path=%(symbols_path)s"
],
|
"run_filename": "",
"testsdir": ""
},
"marionette_emulator": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%
|
(emulator)s",
"--homedir=%(homedir)s",
"--symbols-path=%(symbols_path)s"
],
"run_filename": "",
"testsdir": ""
},
"webapi_desktop": {
"options": [],
"run_filename": "",
"testsdir": ""
},
"webapi_emulator": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--symbols-path=%(symbols_path)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s"
],
"run_filename": "",
"testsdir": ""
}
}
}
|
ncharles/ncf
|
tools/ncf.py
|
Python
|
gpl-3.0
| 6,835
| 0.016971
|
# This is a Python module containing functions to parse and analyze ncf components
# This module is designed to run on the latest major versions of the most popular
# server OSes (Debian, Red Hat/CentOS, Ubuntu, SLES, ...)
# At the time of writing (November 2013) these are Debian 7, Red Hat/CentOS 6,
# Ubuntu 12.04 LTS, SLES 11, ...
# The version of Python in all of these is >= 2.6, which is therefore what this
# module must support
import re
import subprocess
import json
import os.path
# Verbose output
VERBOSE = 0
dirs = [ "10_ncf_internals", "20_cfe_basics", "30_generic_methods", "40_it_ops_knowledge", "50_techniques", "60_services" ]
tags = {}
tags["common"] = ["bundle_name", "bundle_args"]
tags["generic_method"] = ["name", "class_prefix", "class_parameter", "class_parameter_id"]
tags["technique"] = ["name", "description", "version"]
def get_root_dir():
return os.path.realpath(os.path.dirname(__file__) + "/../")
# This method emulates the behavior of subprocess check_output method.
# We aim to be compatible with Python 2.6, thus this method does not exist
# yet in subprocess.
def check_output(command):
if VERBOSE == 1:
print "VERBOSE: About to run command '" + " ".join(command) + "'"
process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
retcode = process.poll()
if retcode != 0:
if VERBOSE == 1:
print "VERBOSE: Exception triggered, Command returned error code " + retcode
raise subprocess.CalledProcessError(retcode, command, output=output[0])
if VERBOSE == 1:
print "VERBOSE: Command output: '" + output[0] + "'"
return output[0]
def get_all_generic_methods_filenames():
return get_all_generic_methods_filenames_in_dir(get_root_dir() + "/tree/30_generic_methods")
def get_all_generic_methods_filenames_in_dir(dir):
return get_all_cf_filenames_under_dir(dir)
def get_all_techniques_filenames():
return get_all_cf_filenames_under_dir(get_root_dir() + "/tree/50_techniques")
def get_all_cf_filenames_under_dir(dir):
filenames = []
filenames_add = filenames.append
for root, dirs, files in os.walk(dir):
for file in files:
if not file.startswith("_") and file.endswith(".cf"):
filenames_add(os.path.join(root, file))
return filenames
def parse_technique_metadata(technique_content):
return parse_bundlefile_metadata(technique_content, "technique")
def parse_generic_method_metadata(technique_content):
return parse_bundlefile_metadata(technique_content, "generic_method")
def parse_bundlefile_metadata(content, bundle_type):
res = {}
for line in content.splitlines():
for tag in tags[bundle_type]:
match = re.match("^\s*#\s*@" + tag + "\s+(.*)$", line)
if match :
res[tag] = match.group(1)
match = re.match("[^#]*bundle\s+agent\s+([^(]+)\(?([^)]*)\)?.*$", line)
if match:
res['bundle_name'] = match.group(1)
res['bundle_args'] = []
if len(match.group(2)):
res['bundle_args'] += [x.strip() for x in match.group(2).split(',')]
# Any tags should come before the "bundle agent" declaration
break
# The tag "class_parameter_id" is a magic tag, it's value is built from class_parameter and the list of args
if "class_parameter_id" in tags[bundle_type]:
try:
res['class_parameter_id'] = res['bundle_args'].index(res['class_parameter'])+1
except:
res['class_parameter_id'] = 0
raise Exception("The class_parameter name \"" + res['class_parameter'] + "\" does not seem to match any of the bundle's parameters")
expected_tags = tags[bundle_type] + tags["common"]
if sorted(res.keys()) != sorted(expected_tags):
missing_keys = [mkey for mkey in expected_tags if mkey not in set(res.keys())]
raise Exception("One or more metadata tags not found before the bundle agent declaration (" + ", ".join(missing_keys) + ")")
return res
def parse_technique_methods(technique_file):
res = []
# Check file exists
if not os.path.exists(technique_file):
raise Exception("No such file: " + technique_file)
out = check_output(["cf-promises", "-pjson", "-f", technique_file])
promises = json.loads(out)
|
# Sanity check: if more than one bundle, this is a weird file and I'm quitting
bundle_count = 0
for bundle in promises['bundles']:
if bundle['bundleType'] == "agent":
bundle_count += 1
if bundle_count > 1:
raise Exception("There is not exactly one bundle in this file, aborting")
# Sanity check: the bundle must be of type agent
if
|
promises['bundles'][0]['bundleType'] != 'agent':
raise Exception("This bundle if not a bundle agent, aborting")
methods_promises = [promiseType for promiseType in promises['bundles'][0]['promiseTypes'] if promiseType['name']=="methods"]
methods = []
if len(methods_promises) >= 1:
methods = methods_promises[0]['contexts']
for context in methods:
class_context = context['name']
for method in context['promises']:
method_name = None
args = None
promiser = method['promiser']
for attribute in method['attributes']:
if attribute['lval'] == 'usebundle':
if attribute['rval']['type'] == 'functionCall':
method_name = attribute['rval']['name']
args = [arg['value'] for arg in attribute['rval']['arguments']]
if attribute['rval']['type'] == 'string':
method_name = attribute['rval']['value']
if args:
res.append({'class_context': class_context, 'method_name': method_name, 'args': args})
else:
res.append({'class_context': class_context, 'method_name': method_name})
return res
def get_all_generic_methods_metadata():
all_metadata = {}
filenames = get_all_generic_methods_filenames()
for file in filenames:
content = open(file).read()
try:
metadata = parse_generic_method_metadata(content)
all_metadata[metadata['bundle_name']] = metadata
except Exception:
continue # skip this file, it doesn't have the right tags in - yuk!
return all_metadata
def get_all_techniques_metadata(include_methods_calls = True):
all_metadata = {}
filenames = get_all_techniques_filenames()
for file in filenames:
content = open(file).read()
try:
metadata = parse_technique_metadata(content)
all_metadata[metadata['bundle_name']] = metadata
if include_methods_calls:
method_calls = parse_technique_methods(file)
all_metadata[metadata['bundle_name']]['method_calls'] = method_calls
except Exception as e:
print "ERROR: Exception triggered, Unable to parse file " + file
print e
continue # skip this file, it doesn't have the right tags in - yuk!
return all_metadata
|
palindromed/data-structures2
|
src/doubly_linked.py
|
Python
|
mit
| 4,501
| 0
|
# _*_ encoding: utf-8 _*_
"""Demonstrate doubly-linked list in python."""
from linked_list import Node
class DoublyLinked(object):
"""Implement a doubly-linked list from a singly-linked list."""
def __init__(self, val=None):
"""Initialize the list."""
self.head = object()
self._mark = self.head
if val:
self.insert(val)
def size(self):
"""Return the length of the list."""
counter = 0
current_node = self.head
while current_node is not self._mark:
counter += 1
current_node = current_node.get_next()
return counter
def search(self, val):
"""Return the node containing 'val' in list if exists, else None."""
current_node = self.head
while current_node.get_data() is not val:
current_node = current_node.get_next()
if current_node is self._mark:
raise IndexError
break
return current_node
def insert(self, val):
"""Insert value at head of list."""
if isinstance(val, list):
for item in val[::-1]:
new_node = DoubleNode(item, self.head, self._mark)
try:
self.head.set_previous(new_node)
except AttributeError:
pass
self.head = new_node
else:
new_node = DoubleNode(val, self.head, self._mark)
try:
self.head.set_previous(new_node)
except AttributeError:
pass
self.head = new_node
def display(self):
"""Print list represented as Python tuple literal."""
output = """"""
current_node = self.head
while current_node is not self._mark:
output += '{}, '.format(current_node.get_data())
current_node = current_node.get_next()
printable = '(' + output[:-2] + ')'
print(printable)
return printable
def pop(self):
"""Pop the first value off the head of the list and return it."""
item = self.head
if item is self._mark:
raise IndexError
else:
self.head = item.get_next()
try:
self.head.set_previous(self._mark)
except AttributeError:
pass
return item.get_data()
def append(self, val):
"""Append the given item to the tail of the list."""
cur = self.head
if cur == self._mark:
new_node = DoubleNode(val, self._mark)
self.head = new_node
else:
new_node = DoubleNode(val, self._mark)
while cur.next_node != self._mark:
cur = cur.next_node
cur.set_next(new_node)
new_node.set_previous(cur)
def shift(self):
"""Remove and returns the last value from the tail of the list."""
cur = self.head
if cur == self._mark:
raise IndexError
else:
while cur.next_node != self._mark:
cur = cur.next_node
try:
cur.prev_node.next_node = self._mark
except Attr
|
ibuteError:
raise IndexError
return cur.get_data()
def remove(self, value):
"""Remove the first occurrence of value in the list."""
previous_node = None
current_node = self.head
while current_node.get_data() is not value:
previous_node = current_node
current_node = current_node.get_next()
|
if current_node.get_data() is None:
break
if current_node.get_data() == value:
previous_node.set_next(current_node.get_next())
else:
print('Not Found')
class DoubleNode(object):
"""Double Node constructor for doubly linked list."""
def __init__(self, data=None, next_node=None, prev_node=None):
"""Initialize the double node."""
self.data = data
self.prev_node = prev_node
self.next_node = next_node
def set_previous(self, prev):
"""Set previous node."""
self.prev_node = prev
def get_data(self):
"""Get data for node."""
return self.data
def get_next(self):
"""Retrieve next node in list."""
return self.next_node
def set_next(self, next_node):
"""Set next node in list."""
self.next_node = next_node
|
jtwhite79/pyemu
|
pyemu/mat/__init__.py
|
Python
|
bsd-3-clause
| 301
| 0.006645
|
"""Th
|
is module contains classes for handling matrices in a linear algebra setting.
The primary objects are the `Matrix` and `Cov`. These objects overload most numerical
operators to autoalign the elements based on row and column names."""
from .mat_handler import Matrix, Cov, Jco, concat, save_co
|
o
|
missyjcat/pythonexercises
|
basic/list1.py
|
Python
|
apache-2.0
| 3,070
| 0.011726
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
count = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
count += 1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
#for word in words:
# if word[0] == 'x':
# takeOut =
return
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
t
|
est(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'app
|
le', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
TargetHolding/pyspark-cassandra
|
python/pyspark_cassandra/streaming.py
|
Python
|
apache-2.0
| 2,902
| 0.002757
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
|
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in
|
writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark_cassandra.util import as_java_object, as_java_array
from pyspark.streaming.dstream import DStream
from pyspark_cassandra.conf import WriteConf
from pyspark_cassandra.util import helper
from pyspark.serializers import AutoBatchedSerializer, PickleSerializer
def saveToCassandra(dstream, keyspace, table, columns=None, row_format=None, keyed=None,
write_conf=None, **write_conf_kwargs):
ctx = dstream._ssc._sc
gw = ctx._gateway
# create write config as map
write_conf = WriteConf.build(write_conf, **write_conf_kwargs)
write_conf = as_java_object(gw, write_conf.settings())
# convert the columns to a string array
columns = as_java_array(gw, "String", columns) if columns else None
return helper(ctx).saveToCassandra(dstream._jdstream, keyspace, table, columns, row_format,
keyed, write_conf)
def joinWithCassandraTable(dstream, keyspace, table, selected_columns=None, join_columns=None):
"""Joins a DStream (a stream of RDDs) with a Cassandra table
Arguments:
@param dstream(DStream)
The DStream to join. Equals to self when invoking joinWithCassandraTable on a monkey
patched RDD.
@param keyspace(string):
The keyspace to join on.
@param table(string):
The CQL table to join on.
@param selected_columns(string):
The columns to select from the Cassandra table.
@param join_columns(string):
The columns used to join on from the Cassandra table.
"""
ssc = dstream._ssc
ctx = ssc._sc
gw = ctx._gateway
selected_columns = as_java_array(gw, "String", selected_columns) if selected_columns else None
join_columns = as_java_array(gw, "String", join_columns) if join_columns else None
h = helper(ctx)
dstream = h.joinWithCassandraTable(dstream._jdstream, keyspace, table, selected_columns,
join_columns)
dstream = h.pickleRows(dstream)
dstream = h.javaDStream(dstream)
return DStream(dstream, ssc, AutoBatchedSerializer(PickleSerializer()))
# Monkey patch the default python DStream so that data in it can be stored to and joined with
# Cassandra tables
DStream.saveToCassandra = saveToCassandra
DStream.joinWithCassandraTable = joinWithCassandraTable
|
angadpc/Alexa-Project-
|
twilio/rest/api/v2010/account/message/feedback.py
|
Python
|
mit
| 5,676
| 0.001409
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FeedbackList(ListResource):
def __init__(self, version, account_sid, message_sid):
"""
Initialize the FeedbackList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param message_sid: The message_sid
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackList
"""
super(FeedbackList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
self._uri = '/Accounts/{account_sid}/Messages/{message_sid}/Feedback.json'.format(**self._solution)
def create(self, outcome=values.unset):
"""
Create a new FeedbackInstance
:param FeedbackInstance.Outcome outcome: The outcome
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
data = values.of({
'Outcome': outcome,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackList>'
class FeedbackPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the FeedbackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param message_sid: The message_sid
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackPage
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackPage
"""
super(FeedbackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FeedbackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackPage>'
class FeedbackInstance(InstanceResource):
class Outcome(object):
CONFIRMED = "confirmed"
UMCONFIRMED = "umconfirmed"
def __init__(self, version, payload, account_sid, message_sid):
"""
Initialize the FeedbackInstance
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
super(FeedbackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'message_sid': payload['message_sid'],
'outcome': payload['outcome'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def message_sid(self):
"""
:returns: The message_sid
:rtype: unicode
"""
return self._properties['message_sid']
@property
def outcome(self):
"""
:returns: The outcome
:rtype: FeedbackInstance.Outcome
"""
return self._properties['outcome']
@pr
|
operty
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype
|
: datetime
"""
return self._properties['date_updated']
@property
def uri(self):
"""
:returns: The uri
:rtype: unicode
"""
return self._properties['uri']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackInstance>'
|
macosforge/ccs-calendarserver
|
twistedcaldav/datafilters/filter.py
|
Python
|
apache-2.0
| 3,318
| 0.000603
|
##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twistedcaldav.ical import Component as iComponent
from twistedcaldav.vcard import Component as vComponent
__all__ = [
"CalendarFilter",
"AddressFilter",
]
class CalendarFilter(object):
"""
Abstract class that defines an iCalendar filter/merge object
"""
def __init__(self):
pass
def filter(self, ical):
"""
Filter the supplied iCalendar object using the request information.
@param ical: iCalendar object
@type ical: L{Component}
@return: L{Component} for the filtered calendar data
"""
raise NotImplementedError
def merge(self, icalnew, icalold):
""
|
"
Merge the old iCalendar object into the new iCalendar data using the request information.
@param icalnew: new iCalendar object to merge data into
@type icalnew: L{Component}
@param icalold: old iCalendar data to merge data from
@type icalold: L{Component}
"""
raise NotImplementedError
def validCale
|
ndar(self, ical):
# If we were passed a string, parse it out as a Component
if isinstance(ical, str):
try:
ical = iComponent.fromString(ical)
except ValueError:
raise ValueError("Not a calendar: %r" % (ical,))
if ical is None or ical.name() != "VCALENDAR":
raise ValueError("Not a calendar: %r" % (ical,))
return ical
class AddressFilter(object):
"""
Abstract class that defines a vCard filter/merge object
"""
def __init__(self):
pass
def filter(self, vcard):
"""
Filter the supplied vCard object using the request information.
@param vcard: iCalendar object
@type vcard: L{Component}
@return: L{Component} for the filtered vcard data
"""
raise NotImplementedError
def merge(self, vcardnew, vcardold):
"""
Merge the old vcard object into the new vcard data using the request information.
@param vcardnew: new vcard object to merge data into
@type vcardnew: L{Component}
@param vcardold: old vcard data to merge data from
@type vcardold: L{Component}
"""
raise NotImplementedError
def validAddress(self, vcard):
# If we were passed a string, parse it out as a Component
if isinstance(vcard, str):
try:
vcard = vComponent.fromString(vcard)
except ValueError:
raise ValueError("Not a vcard: %r" % (vcard,))
if vcard is None or vcard.name() != "VCARD":
raise ValueError("Not a vcard: %r" % (vcard,))
return vcard
|
aikramer2/spaCy
|
spacy/lang/en/__init__.py
|
Python
|
mit
| 1,389
| 0.00216
|
# coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .norm_exceptions import NORM_EXCEPTIONS
from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .morph_rules import MORPH_RULES
from .lemmatizer import LEMMA_RULES, LEMMA_INDEX, LEMMA_EXC, LOOKUP
from .syntax_
|
iterators import SYNTAX_ITER
|
ATORS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
def _return_en(_):
return 'en'
class EnglishDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters.update(LEX_ATTRS)
lex_attr_getters[LANG] = _return_en
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM],
BASE_NORMS, NORM_EXCEPTIONS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
tag_map = TAG_MAP
stop_words = STOP_WORDS
morph_rules = MORPH_RULES
lemma_rules = LEMMA_RULES
lemma_index = LEMMA_INDEX
lemma_exc = LEMMA_EXC
lemma_lookup = LOOKUP
syntax_iterators = SYNTAX_ITERATORS
class English(Language):
lang = 'en'
Defaults = EnglishDefaults
__all__ = ['English']
|
warvariuc/mykde
|
packages/__init__.py
|
Python
|
bsd-3-clause
| 301
| 0
|
import myk
|
de
class ActionPackage(mykde.ActionPackage):
author = 'Victor Varvaryuk <victor.varvariuc@gmail.com>'
version = 2
description = """
TODO:
xnview - unpack to ~/apps/ and create .desktop file in Graphics category
clip2net
galaxy icons libreoffice, enter ke
|
y behavior in calc
"""
|
eayunstack/python-neutronclient
|
neutronclient/neutron/v2_0/qos/bandwidth_limit_rule.py
|
Python
|
apache-2.0
| 3,455
| 0
|
# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.qos import rule as qos_rule
BANDWIDTH_LIMIT_RULE_RESOURCE = 'bandwidth_limit_rule'
def add_bandwidth_limit_arguments(parser):
parser.add_argument(
'--max-kbps',
help=_('max bandwidth in kbps.'))
parser.add_argument(
'--max-burst-kbps',
help=_('max burst bandwidth in kbps.'))
def update_bandwidth_limit_args2body(parsed_args, body):
max_kbps = parsed_args.max_kbps
max_burst_kbps = parsed_args.max_burst_kbps
if not (max_kbps or max_burst_kbps):
raise exceptions.CommandError(_("Must provide max_kbps"
" or max_burst_kbps option."))
neutronv20.update_dict(parsed_args, body,
['max_kbps', 'max_burst_kbps', 'tenant_id'])
class CreateQoSBandwidthLimitRule(qos_rule.QosRuleMixin,
neutronv20.CreateCommand):
"""Create a qos bandwidth limit rule."""
resource = BAND
|
WIDTH_LIMIT_RULE_RESOURCE
def add_known_arguments(self, parser):
super(CreateQoSBandwidthLimitRule, self).add_known_arguments(parser)
add_bandwidth_limit_arguments(par
|
ser)
def args2body(self, parsed_args):
body = {}
update_bandwidth_limit_args2body(parsed_args, body)
return {self.resource: body}
class ListQoSBandwidthLimitRules(qos_rule.QosRuleMixin,
neutronv20.ListCommand):
"""List all qos bandwidth limit rules belonging to the specified policy."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
_formatters = {}
pagination_support = True
sorting_support = True
class ShowQoSBandwidthLimitRule(qos_rule.QosRuleMixin, neutronv20.ShowCommand):
"""Show information about the given qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
allow_names = False
class UpdateQoSBandwidthLimitRule(qos_rule.QosRuleMixin,
neutronv20.UpdateCommand):
"""Update the given qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
allow_names = False
def add_known_arguments(self, parser):
super(UpdateQoSBandwidthLimitRule, self).add_known_arguments(parser)
add_bandwidth_limit_arguments(parser)
def args2body(self, parsed_args):
body = {}
update_bandwidth_limit_args2body(parsed_args, body)
return {self.resource: body}
class DeleteQoSBandwidthLimitRule(qos_rule.QosRuleMixin,
neutronv20.DeleteCommand):
"""Delete a given qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
allow_names = False
|
rbaumg/trac
|
contrib/workflow/migrate_original_to_basic.py
|
Python
|
bsd-3-clause
| 1,456
| 0.000687
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2019 Edgewall Software
# Copyright (C) 2007 Eli Carter <retracile@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import sys
import trac.env
from trac.ticket.default_workflow import load_workflow_config_snippet
def main():
"""Rewrite the ticket-workflow section of the config; and change all
'assigned' tickets to 'accepted'.
"""
if len(sys.argv) != 2:
print("Usage: %s path_to_trac_environment" % sys.argv[0])
sys.exit(1)
tracdir = sys.argv[1]
trac_env = trac.env.open_environment(tracdir)
# Update the config...
old_workflow = trac_env.config.options('ticket-workflow')
for name, value in old_workflow:
trac_env.config.remove('ticket-workflow', name)
load_workflow_config_snippet(trac_env.config, 'basic-workflow.ini')
trac_env.config.save()
# Update the ticket statuses...
|
trac_env.db_transaction("""
UP
|
DATE ticket SET status = 'accepted' WHERE status = 'assigned'
""")
if __name__ == '__main__':
main()
|
apache/incubator-allura
|
ForgeTracker/forgetracker/tests/unit/test_ticket_model.py
|
Python
|
apache-2.0
| 14,297
| 0.00028
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to
|
in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITH
|
OUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from datetime import datetime
import urllib2
from ming.orm.ormsession import ThreadLocalORMSession
from ming.orm import session
from ming import schema
from nose.tools import raises, assert_equal, assert_in
from forgetracker.model import Ticket, TicketAttachment
from forgetracker.tests.unit import TrackerTestWithModel
from forgetracker.import_support import ResettableStream
from allura.model import Feed, Post, User
from allura.lib import helpers as h
from allura.tests import decorators as td
class TestTicketModel(TrackerTestWithModel):
def test_that_label_counts_are_local_to_tool(self):
"""Test that label queries return only artifacts from the specified
tool.
"""
# create a ticket in two different tools, with the same label
from allura.tests import decorators as td
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
def _test_ticket():
return Ticket(ticket_num=1, summary="ticket1", labels=["mylabel"])
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def _test_ticket2():
return Ticket(ticket_num=2, summary="ticket2", labels=["mylabel"])
# create and save the tickets
t1 = _test_ticket()
t2 = _test_ticket2()
ThreadLocalORMSession.flush_all()
# test label query results
label_count1 = t1.artifacts_labeled_with(
"mylabel", t1.app_config).count()
label_count2 = t2.artifacts_labeled_with(
"mylabel", t2.app_config).count()
assert 1 == label_count1 == label_count2
def test_that_it_has_ordered_custom_fields(self):
custom_fields = dict(my_field='my value')
Ticket(summary='my ticket', custom_fields=custom_fields, ticket_num=3)
ThreadLocalORMSession.flush_all()
ticket = Ticket.query.get(summary='my ticket')
assert ticket.custom_fields == dict(my_field='my value')
@raises(schema.Invalid)
def test_ticket_num_required(self):
Ticket(summary='my ticket')
def test_ticket_num_required2(self):
t = Ticket(summary='my ticket', ticket_num=12)
try:
t.ticket_num = None
except schema.Invalid:
pass
else:
raise AssertionError('Expected schema.Invalid to be thrown')
def test_activity_extras(self):
t = Ticket(summary='my ticket', ticket_num=12)
assert_in('allura_id', t.activity_extras)
assert_equal(t.activity_extras['summary'], t.summary)
def test_private_ticket(self):
from allura.model import ProjectRole
from allura.model import ACE, DENY_ALL
from allura.lib.security import Credentials, has_access
from allura.websetup import bootstrap
admin = c.user
creator = bootstrap.create_user('Not a Project Admin')
developer = bootstrap.create_user('Project Developer')
observer = bootstrap.create_user('Random Non-Project User')
anon = User(_id=None, username='*anonymous',
display_name='Anonymous')
t = Ticket(summary='my ticket', ticket_num=3,
reported_by_id=creator._id)
assert creator == t.reported_by
role_admin = ProjectRole.by_name('Admin')._id
role_developer = ProjectRole.by_name('Developer')._id
role_creator = ProjectRole.by_user(t.reported_by, upsert=True)._id
ProjectRole.by_user(
developer, upsert=True).roles.append(role_developer)
ThreadLocalORMSession.flush_all()
cred = Credentials.get().clear()
t.private = True
assert_equal(t.acl, [
ACE.allow(role_developer, 'save_searches'),
ACE.allow(role_developer, 'read'),
ACE.allow(role_developer, 'create'),
ACE.allow(role_developer, 'update'),
ACE.allow(role_developer, 'unmoderated_post'),
ACE.allow(role_developer, 'post'),
ACE.allow(role_developer, 'moderate'),
ACE.allow(role_developer, 'delete'),
ACE.allow(role_creator, 'read'),
ACE.allow(role_creator, 'post'),
ACE.allow(role_creator, 'create'),
ACE.allow(role_creator, 'unmoderated_post'),
DENY_ALL])
assert has_access(t, 'read', user=admin)()
assert has_access(t, 'create', user=admin)()
assert has_access(t, 'update', user=admin)()
assert has_access(t, 'read', user=creator)()
assert has_access(t, 'post', user=creator)()
assert has_access(t, 'unmoderated_post', user=creator)()
assert has_access(t, 'create', user=creator)()
assert not has_access(t, 'update', user=creator)()
assert has_access(t, 'read', user=developer)()
assert has_access(t, 'create', user=developer)()
assert has_access(t, 'update', user=developer)()
assert not has_access(t, 'read', user=observer)()
assert not has_access(t, 'create', user=observer)()
assert not has_access(t, 'update', user=observer)()
assert not has_access(t, 'read', user=anon)()
assert not has_access(t, 'create', user=anon)()
assert not has_access(t, 'update', user=anon)()
t.private = False
assert t.acl == []
assert has_access(t, 'read', user=admin)()
assert has_access(t, 'create', user=admin)()
assert has_access(t, 'update', user=admin)()
assert has_access(t, 'read', user=developer)()
assert has_access(t, 'create', user=developer)()
assert has_access(t, 'update', user=developer)()
assert has_access(t, 'read', user=creator)()
assert has_access(t, 'unmoderated_post', user=creator)()
assert has_access(t, 'create', user=creator)()
assert not has_access(t, 'update', user=creator)()
assert has_access(t, 'read', user=observer)()
assert has_access(t, 'read', user=anon)()
def test_feed(self):
t = Ticket(
app_config_id=c.app.config._id,
ticket_num=1,
summary='test ticket',
description='test description',
created_date=datetime(2012, 10, 29, 9, 57, 21, 465000))
assert_equal(t.created_date, datetime(2012, 10, 29, 9, 57, 21, 465000))
f = Feed.post(
t,
title=t.summary,
description=t.description,
pubdate=t.created_date)
assert_equal(f.pubdate, datetime(2012, 10, 29, 9, 57, 21, 465000))
assert_equal(f.title, 'test ticket')
assert_equal(f.description,
'<div class="markdown_content"><p>test description</p></div>')
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.assigned_to_id = User.by_username('test-user')._id
ticket.discussion_thread.add_post(text='test comment'
|
lesscomplex/HomeSec
|
lock/buzz_anm.py
|
Python
|
agpl-3.0
| 2,986
| 0.081045
|
import RPi.GPIO as GPIO
import time
buzzer_pin = 27
notes = {
'B0' : 31,
'C1' : 33, 'CS1' : 35,
'D1' : 37, 'DS1' : 39,
'EB1' : 39,
'E1' : 41,
'F1' : 44, 'FS1' : 46,
'G1' : 49, 'GS1' : 52,
'A1' : 55, 'AS1' : 58,
'BB1' : 58,
'B1' : 62,
'C2' : 65, 'CS2' : 69,
'D2' : 73, 'DS2' : 78,
'EB2' : 78,
'E2' : 82,
'F2' : 87, 'FS2' : 93,
'G2' : 98, 'GS2' : 104,
'A2' : 110, 'AS2' : 117,
'BB2' : 123,
'B2' : 123,
'C3' : 131, 'CS3' : 139,
'D3' : 147, 'DS3' : 156,
'EB3' : 156,
'E3' : 165,
'F3' : 175, 'FS3' : 185,
'G3' : 196, 'GS3' : 208,
'A3' : 220, 'AS3' : 233,
'BB3' : 233,
'B3' : 247,
'C4' : 262, 'CS4' : 277,
'D4' : 294, 'DS4' : 311,
'EB4' : 311,
'E4' : 330,
'F4' : 349, 'FS4' : 370,
'G4' : 392, 'GS4' : 415,
'A4' : 440, 'AS4' : 466,
'BB4' : 466,
'B4' : 494,
'C5' : 523, 'CS5' : 554,
'D5' : 587, 'DS5' : 622,
'EB5' : 622,
'E5' : 659,
'F5' : 698, 'FS5' : 740,
'G5' : 784, 'GS5' : 831,
'A5' : 880, 'AS5' : 932,
'BB
|
5' : 932,
'B5' : 988,
'C6' : 1047, 'CS6' : 1109,
'D6' : 1175, 'DS6' : 1245,
'EB6' : 1245,
'E6' : 1319,
'F6' : 1397, 'FS6' : 1480,
'G6' : 1568, 'GS6' : 1661,
'A6' : 1760, 'AS6' : 1865,
'BB6' : 1865,
'B6' : 1976,
'C7' : 2093, 'CS7' : 2217,
'D7' : 2349, 'DS7' : 2489,
'EB7' : 2489,
'E7' : 2637,
'F7' : 2794, 'FS7' : 2960,
'G7' : 3136, 'GS7' : 3322,
'A7' : 3520, 'AS7' : 3729,
'BB7' : 3729,
'B7' : 3951,
'C8' : 4186, 'CS8' : 4435,
'D8' : 4699, 'DS8' : 4978
}
anmeldung = [
notes
|
['F4'],notes['A4'],notes['G4'],notes['C4'],
]
anmeldung_tempo = [
0.25,0.25,0.25,0.35,
]
def buzz(frequency, length): #create the function "buzz" and feed it the pitch and duration)
if(frequency==0):
time.sleep(length)
return
period = 1.0 / frequency #in physics, the period (sec/cyc) is the inverse of the frequency (cyc/sec)
delayValue = period / 2 #calcuate the time for half of the wave
numCycles = int(length * frequency) #the number of waves to produce is the duration times the frequency
for i in range(numCycles): #start a loop from 0 to the variable "cycles" calculated above
GPIO.output(buzzer_pin, True) #set pin 27 to high
time.sleep(delayValue) #wait with pin 27 high
GPIO.output(buzzer_pin, False) #set pin 27 to low
time.sleep(delayValue) #wait with pin 27 low
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(buzzer_pin, GPIO.IN)
GPIO.setup(buzzer_pin, GPIO.OUT)
def destroy():
GPIO.cleanup() # Release resource
def play(melody,tempo,pause,pace=0.800):
for i in range(0, len(melody)): # Play song
noteDuration = tempo[i]*pace
buzz(melody[i],noteDuration) # Change the frequency along the song note
pauseBetweenNotes = noteDuration * pause
time.sleep(pauseBetweenNotes)
if __name__ == '__main__': # Program start from here
try:
setup()
print "Anmeldung"
play(anmeldung, anmeldung_tempo, 0.50, 1.5000)
time.sleep(2)
destroy()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
|
danieltalsky/gp-code-test
|
books.py
|
Python
|
unlicense
| 1,974
| 0.003546
|
#!/usr/bin/env python
"""
books.py
reads a list of books from an input file and returns them filtered and sorted
features
- iterates through records without holding the entire dataset in memory, allowing for large datasets
- uses SQLite for storage and retrieval
"""
import os
import argparse
import sqlite3
from book_list.book_list_file_reader import BookListFileReader
from book_list.book_list import BookList
# Config
curdir = dir_path = os.path.dirname(os.path.realpath(__file__))
SQLITE3_DB_FILE = curdir + '/db/booklist.sqlite3'
file_import_list = {
'csv': curdir + '/code-test-source-files/csv',
'pipe': curdir + '/code-test-source-files/pipe',
'slash': curdir + '/code-test-source-files/slash',
}
# Command line parsing
parser = argparse.ArgumentParser(
prog='Read multiple formats of book data and display them filtered and sorted.'
)
parser.add_argument('--filter', action='store', default=Non
|
e,
help='show a subset of books, looks for the argument as a substring of any of the fields')
parser.add_argument('--year', action='store_true', default=False,
help="sort the books by year, ascending inst
|
ead of default sort")
parser.add_argument('--reverse', action='store_true', default=False,
help='reverse sort')
args = parser.parse_args()
# Read files and populate book list
sqlite3_connection = sqlite3.Connection(SQLITE3_DB_FILE);
book_list = BookList(sqlite3_connection)
for parse_type, file_path in file_import_list.iteritems():
reader = BookListFileReader(file_path, parse_type)
while True:
row = reader.get_result()
if row is None:
break
book_list.insert_record(row)
# Make query based on command line arguments
book_list.query_book_list(filter=args.filter, year=args.year, reverse=args.reverse)
# Output
while True:
row = book_list.get_record()
if row == None:
break
print("{}, {}, {}, {}".format(*row))
|
mhbu50/erpnext
|
erpnext/portal/doctype/homepage/homepage.py
|
Python
|
gpl-3.0
| 801
| 0.021223
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_webs
|
ite': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dic
|
t(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
|
QuantumQuadrate/CsPyController
|
python/exp_functional_waveforms/hybridChop.py
|
Python
|
lgpl-3.0
| 469
| 0.036247
|
def choppedRO(t,period=2e-3,RO_onoff=[0,.5],Trap_onoff=[.5,1]):
'''
period: time in ms
RO_onoff: tuple containing [on,off] as a percentage of
|
period
Trap_onoff: tuple containing [on,off] as a
|
percentage of period
'''
D2_switch(t,0)
vODT_switch(t,0)
D2_switch(t+RO_onoff[0]*period,1)
D2_switch(t+RO_onoff[1]*period,0)
vODT_switch(t+Trap_onoff[0]*period,1)
vODT_switch(t+Trap_onoff[1]*period,0)
return t+period
|
leecannon/trending
|
trending/count.py
|
Python
|
mit
| 4,854
| 0.004738
|
# Copyright (c) 2016 Lee Cannon
# Licensed under the MIT License, see included LICENSE File
from collections import Counter
from .filter import at_trigrams, with_words
def count_trigrams(interactions: list, minimum: int = 1, n: int = None, include_unknown: bool = False) -> list:
"""Returns the n most common trigrams in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore trigrams that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param include_unknown: Determines if the interactions with unknown trigrams should be included. Default False
:type include_unknown: bool
:return: The list of most common trigrams in the interactions given.
:rtype: list
"""
# The below (if not interaction.trigram == 'OWN') ignores unknown trigrams
if not include_unknown:
trigram_list = [interaction.trigram for interaction in interactions if not interaction.trigram == 'OWN']
else:
trigram_list = [interaction.trigram for interaction in interactions]
return [trigram for trigram in Counter(trigram_list).most_common(n=n) if trigram[1] > minimum]
def count_words(interactions: list, minimum: int = 1, n: int = None, additional_words
|
_to_ignore: list=None) -> list:
"""Returns the n most common words in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param min
|
imum: Ignore words that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words in the interactions given.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
word_list = [word for interaction in interactions for word in set(interaction.title_words)
if word not in additional_words_to_ignore]
counts = Counter(word_list).most_common(n=n)
counts = [count for count in counts if count[1] > minimum]
return counts
def count_interactions(interactions: list):
return len(interactions)
def count_words_at_trigrams(interactions: list, trigrams: list, n: int = None, minimum: int = 2,
additional_words_to_ignore: list = None) -> list:
"""Returns the list of most common words at the given trigram in order. Ignores words where the number of
occurrences is less than the minimum.
Example of returned list:
| [['modnet', 1234],
| ['password', 123],
| ['outlook', 34],
| ['network', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param trigrams: The list of trigrams to check.
:type trigrams: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words at the given trigram.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
return [word for word in count_words(at_trigrams(interactions, trigrams), n=n)
if word[1] >= minimum and word[0] not in additional_words_to_ignore]
def count_trigram_with_words(interactions: list, words: list, n: int = None, minimum: int = 2) -> list:
"""Returns the list of most common trigrams for occurrences of the given word in order. Ignores trigrams where the
number of occurrences is less than the minimum.
Example of returned list:
| [['ABW', 1234],
| ['NOW', 123],
| ['YOR', 34],
| ['BRC', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param words: The list of words to check.
:type words: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:return: The list of most common words at the given trigram.
:rtype: list
"""
return [trigram for trigram in count_trigrams(with_words(interactions, words), n=n)
if trigram[1] >= minimum]
|
Tepira/binwalk
|
src/binwalk/core/smart.py
|
Python
|
mit
| 12,128
| 0.002803
|
# "Smart" parser for handling libmagic signature results. Specifically, this implements
# support for binwalk's custom libmagic signature extensions (keyword tags, string processing,
# false positive detection, etc).
import re
import binwalk.core.module
from binwalk.core.compat import *
from binwalk.core.common import get_quoted_strings, MathExpression
class Tag(object):
TAG_DELIM_START = "{"
TAG_DELIM_END = "}"
TAG_ARG_SEPERATOR = ":"
def __init__(self, **kwargs):
self.name = None
self.keyword = None
self.type = None
self.handler = None
self.tag = None
self.default = None
for (k,v) in iterator(kwargs):
setattr(self, k, v)
if self.type == int:
self.default = 0
elif self.type == str:
self.default = ''
if self.keyword is not None:
self.tag = self.TAG_DELIM_START + self.keyword
if self.type is None:
self.tag += self.TAG_DELIM_END
else:
self.tag += self.TAG_ARG_SEPERATOR
if self.handler is None:
if self.type == int:
self.handler = 'get_math_arg'
elif self.type == str:
self.handler = 'get_keyword_arg'
class Signature(object):
'''
Class for parsing smart signature tags in libmagic result strings.
This class is intended for internal use only, but a list of supported 'smart keywords' that may be used
in magic files is available via the SmartSignature.KEYWORDS dictionary:
from binwalk import SmartSignature
for tag in SmartSignature.TAGS:
print tag.keyword
'''
TAGS = [
Tag(name='raw-string', keyword='raw-string', type=str, handler='parse_raw_string'),
Tag(name='string-len', keyword='string-len', type=str, handler='parse_string_len'),
Tag(name='math', keyword='math', type=int, handler='parse_math'),
Tag(name='one-of-many', keyword='one-of-many', handler='one_of_many'),
Tag(name='display-once', keyword='display-once', handler='display_once'),
Tag(name='jump', keyword='jump-to-offset', type=int),
Tag(name='name', keyword='file-name', type=str),
Tag(name='size', keyword='file-size', type=int),
Tag(name='adjust', keyword='offset-adjust', type=int),
Tag(name='delay', keyword='extract-delay', type=str),
Tag(name='year', keyword='file-year', type=str),
Tag(name='epoch', keyword='file-epoch', type=int),
Tag(name='raw-size', keyword='raw-string-length', type=int),
Tag(name='raw-replace', keyword='raw-replace'),
Tag(name='string-len-replace', keyword='string-len'),
]
def __init__(self, filter, ignore_smart_signatures=False):
'''
Class constructor.
@filter - Instance of the MagicFilter class.
@ignore_smart_signatures - Set to True to ignore smart signature keywords.
Returns None.
'''
self.filter = filter
self.last_one_of_many = None
self.valid_once_already_seen = set()
self.ignore_smart_signatures = ignore_smart_signatures
def parse(self, data):
'''
Parse a given data string for smart signature keywords. If any are found, interpret them and strip them.
@data - String to parse, as returned by libmagic.
Returns a dictionary of parsed values.
'''
results = {}
self.valid = True
self.display = True
if data:
for tag in self.TAGS:
if tag.handler is not None:
(d, arg) = getattr(self, tag.handler)(data, tag)
if not self.ignore_smart_signatures:
data = d
if isinstance(arg, type(False)) and arg == False and not self.ignore_smart_signatures:
self.valid = False
elif tag.type is not None:
if self.ignore_smart_signatures:
results[tag.name] = tag.default
else:
results[tag.name] = arg
if self.ignore_smart_signatures:
results['description'] = data
else:
results['description'] = self.strip_tags(data)
else:
self.valid = False
results['valid'] = self.valid
results['display'] = self.display
return binwalk.core.module.Result(**results)
def tag_lookup(self, keyword):
for tag in self.TAGS:
if tag.keyword == keyword:
return tag
return None
def is_valid(self, data):
'''
Validates that result data does not contain smart keywords in file-supplied strings.
@data - Data string to validate.
Returns True if data is OK.
Returns False if data is not OK.
'''
# All strings printed from the target file should be placed in strings, else there is
# no way to distinguish between intended keywords and unintended keywords. Get all the
# quoted strings.
quoted_data = get_quoted_strings(data)
# Check to see if there was any quoted data, and if so, if it contained the keyword starting delimiter
if quoted_data and Tag.TAG_DELIM_START in quoted_data:
# If so, check to see if the quoted data contains any of our keywords.
# If any keywords are found inside of quoted data, consider the keywords invalid.
for tag in self.TAGS:
if tag.tag in quoted_data:
return False
return True
def safe_string(self, data):
'''
Strips out quoted data (i.e., data taken directly from a file).
'''
quoted_string = get_quoted_strings(data)
if quoted_string:
data = data.replace('"' + quoted_string + '"', "")
return data
def display_once(self, data, tag):
'''
Determines if a given data string should be printed if {display-once} was specified.
@data - String result data.
Returns False if the string result should not be displayed.
Returns True if the string result should be displayed.
'''
if self.filter.valid_result(data):
signature = data.split(',')[0]
if signature in self.valid_once_already_seen:
self.display = False
return (data, False)
elif tag.tag in data:
self.valid_once_already_seen.add(signature)
return (data, True)
return (data, True)
def one_of_many(self, data, tag):
'''
Determines if a given data string is one result of many.
@data - String result data.
Returns False if the string result is one of many and should not be displayed.
Returns True if the string result is not one of many and should be displayed.
'''
if self.filter.valid_result(data):
if self.last_one_of_many is not None and data.startswith(self.last_one_of_many):
self.display = False
elif tag.tag in data:
# Only match on the data before the first comma, as that is typically unique and static
self.last_one_of_many = data.split(',')[0]
else:
self.last_one_of_many = None
return (data, True)
def get_keyword_arg(self, data, tag):
'''
Retrieves the argument for keywords that specify arguments.
@data - String result data, as returned by libmagic.
@keyword - Keyword index in KEYWORDS.
Returns the argument string value on success.
Returns a blank string on failure.
'''
|
arg = ''
safe_data = self.safe_string(data)
|
if tag.tag in safe_data:
arg = safe_data.split(tag.tag)[1].split(tag.TAG_DELIM_END)[0]
return (data, arg)
def get_math_arg(self, data, tag):
'''
Retrieves the argument for keywords that specifiy mathematical e
|
ljx0305/ice
|
allTests.py
|
Python
|
gpl-2.0
| 476
| 0.006303
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included
|
in this distribution.
#
# *****************************
|
*****************************************
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "scripts"))
from Util import runTests
runTests()
|
huaiping/pandora
|
membership/urls.py
|
Python
|
mit
| 324
| 0
|
from django.conf.urls import url
from . import views
urlpa
|
tterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', views.login, name='login'),
url(r'^register/$', views.register, name='register'),
url(r'^logout/$', views.logout, name='logout'),
|
url(r'^plaza/$', views.plaza, name='plaza'),
]
|
SkySchermer/uweclang
|
uweclang/plain/clean.py
|
Python
|
mit
| 7,350
| 0.000954
|
# -*- coding: utf-8 -*-
"""UWEC Language Tools student corpus module
Provides functions for processing student corpus data.
"""
# Python 3 forward compatability imports.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Standard imports
import re
# Setup logger.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
def punctuation_density(text, punctuation=r'[^\w\s]'):
"""Retu
|
rns the punctuation density of the given text.
Arguments:
text (str): The input text.
punctuation (str): A regex pattern for matching punctuation characters.
Defaults to r'[,.!?:;\\/]'.
Returns:
(float): The density of p
|
untuation in the text.
"""
if len(text) == 0:
return 0
words = re.sub(r'\W', ' ', text).split()
puncs = float(sum([len(re.findall(punctuation, x)) for x in text]))
return (puncs / len(words)) if len(words) > 0 else 0.0
def capitalization_density(text):
"""Returns the word-starting capitalized character density of the given
text.
Arguments:
text (str): The input text.
Returns:
(float): The density of capitalized words in the text.
"""
if len(text) == 0:
return 0
words = re.sub(r'\W', ' ', text).split()
caps = float(sum([1 for x in words if re.match('[A-Z]', x)]))
return (caps / len(words)) if len(words) > 0 else 0.0
def clean_punctuation(text, punctuation=r',\.!\?:;'):
"""Returns text modified by removing whitespace before punctuation.
Arguments:
text (str): The input text.
punctuation (str): regex containing the punctuation to remove
whitespace before. Defaults to ',\.!\?:;'.
Returns:
(str): The modified text.
"""
# Straighten quotes, remove interior spaces.
text = re.sub(r'“ ?| ?”', '\"', text)
text = re.sub(r'‘ ?| ?’', '\'', text)
# Remove punctuation after quotes.
text = re.sub(r'([”"])\s*([{0}])'.format(punctuation), r'\2\1 ', text)
text = re.sub(r'([”"])\s*([{0}])'.format(punctuation), r'\1 ', text)
# Remove strings of punctuation.
text = re.sub(r'\b ?([{0}])[{0}\s]+'.format(punctuation), r'\1 ', text)
# Remove extra whitespace.
text = re.sub(r'\s+', r' ', text)
return text
def parse_parentheticals(text, lparen='\(', rparen='\)'):
"""Parses the given text and returns a tree of parentheticals.
Arguments:
text (str): The input text.
lparen (str): A regex for matching the left parenthetical delimiter.
rparen (str): A regex for matching the right parenthetical delimiter.
Returns:
(dict | [str]): A dictionary representing the parse tree or a list of
strings. Each node of the tree will have the following structure:
{'parens': (l, r), 'text': []}
where (l, r) are the parentheticals wrapping the text, and the list
contains raw text and subnodes. For example, the following string
'ab)c((d)ef)g()(hi'
will return:
{'parens': None,
'text': ['ab',
')',
'c',
{'parens': ('(', ')'),
'text': [{'parens': ('(', ')'), 'text': ['d']}, 'ef']},
'g',
{'parens': ('(', ')'), 'text': []},
{'parens': ('(', None), 'text': ['hi']}]}
Unmatched lparens will be interpretted as regular text. Unmatched
rparens will have None as their second parens tuple element. If there
are no parentheticals, a list of text will be returned.
"""
# Precompile regular expressions for ease of use.
n_regex = re.compile(r'([^{}{}]*)'.format(lparen, rparen))
l_regex = re.compile(r'({})'.format(lparen))
r_regex = re.compile(r'({})'.format(rparen))
# Build root of tree.
tree = {'parens': (None, None),
'text': []}
context = [tree]
rest = text
# Keep parsing until nothing is left.
while rest:
node = context[0]
# Match rparens.
m = r_regex.match(rest)
if m:
if node['parens'] == (None, None):
node['text'].append(m.group(1))
else:
node = context.pop(0)
node['parens'] = (node['parens'][0], m.group(1))
rest = rest[len(m.group(1)):]
continue
# Match lparens.
m = l_regex.match(rest)
if m:
new_node = {'parens': (m.group(1), None),
'text': []}
node['text'].append(new_node)
context.insert(0, new_node)
rest = rest[len(m.group(1)):]
continue
# Match text.
m = n_regex.match(rest)
if m:
node['text'].append(m.group(1))
rest = rest[len(m.group(1)):]
# Remove highest level tree if whole string is parenthetical.
if len(tree['text']) == 1:
tree = [tree['text'][0]]
return tree
def recombine_parentheticals(parse_tree, selector_function=None, sep=''):
"""Recombines text seperated by the seperate_parentheticals function by
using a selector function to determine which portions to keep or discard.
Arguments:
parse_tree (dict): A tree of parsed parentheticals
(See parse_parentheticals.)
selector_function ((str, str), str -> true): A function taking a pair
of parenthesis and a string, and returning whether to keep the
string or discard it. Allows for selective recombination of text.
Defaults to None (everything is kept.)
sep (str): The seperator to use when combining the text. Defaults to
''.
Returns:
(str): The resulting text.
Raises:
(ValueError): When unkown values are contained in parse_tree.
"""
# Set default selector test function if none is provided.
selector_function = selector_function or (lambda x, y: True)
# Reconstruct parse tree root for lists and strings.
if isinstance(parse_tree, list):
parse_tree = {'parens': (None, None), 'text': parse_tree}
elif isinstance(parse_tree, str) or isinstance(parse_tree, unicode):
parse_tree = {'parens': (None, None), 'text': [parse_tree]}
text = []
for item in parse_tree['text']:
if isinstance(item, str) or isinstance(item, unicode):
text.append(item)
elif isinstance(item, dict):
# Recreate text from rest of this node.
res = recombine_parentheticals(item,
selector_function=selector_function,
sep=sep)
# Append text if it passes selector test.
if selector_function(parse_tree['parens'], res):
text.append(res)
else:
raise ValueError('Unknown parse tree content.')
res = sep.join(text)
# Use selector test on the whole tree.
if selector_function(parse_tree['parens'], res):
l = parse_tree['parens'][0]
r = parse_tree['parens'][1]
return sep.join([x for x in [l, res, r] if x is not None])
return ''
|
yaybu/touchdown
|
touchdown/tests/fixtures/ssh_connection.py
|
Python
|
apache-2.0
| 2,710
| 0.000369
|
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import threading
import paramiko
from touchdown.tests.fixtures.fixture import Fixture
c
|
lass DummyServer(paramiko.ServerInterface):
def get_allowed_auths(self, username):
return "publickey,password"
def check_auth_password(self, username, password
|
):
return paramiko.AUTH_SUCCESSFUL
def check_auth_publickey(self, username, key):
return paramiko.AUTH_SUCCESSFUL
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
return True
def check_channel_shell_request(self, channel):
return True
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
return True
class SshConnectionFixture(Fixture):
def __enter__(self):
self.listen_socket = socket.socket()
self.listen_socket.bind(("0.0.0.0", 0))
self.listen_socket.listen(1)
self.address, self.port = self.listen_socket.getsockname()
self.fixtures.push(lambda *exc_info: self.listen_socket.close())
self.event = threading.Event()
self.ssh_connection = self.workspace.add_ssh_connection(
name="test-ssh-connection", hostname=self.address, port=self.port
)
self.listen_thread = threading.Thread(target=self.server_thread)
self.listen_thread.daemon = True
self.listen_thread.start()
return self
def server_thread(self):
self.client_socket, addr = self.listen_socket.accept()
self.fixtures.push(lambda *exc_info: self.client_socket.close())
self.server_transport = paramiko.Transport(self.client_socket)
self.fixtures.push(lambda *exc_info: self.server_transport.close())
self.server_transport.add_server_key(
paramiko.RSAKey.from_private_key_file(
os.path.join(os.path.dirname(__file__), "..", "assets/id_rsa_test")
)
)
self.server = DummyServer()
self.server_transport.start_server(self.event, self.server)
|
flp9001/eveggie
|
config/settings/base.py
|
Python
|
mit
| 10,190
| 0.001865
|
"""
Django settings for eveggie project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (eveggie/config/settings/base.py - 3 = eveggie/)
APPS_DIR = ROOT_DIR.path('eveggie')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'eveggie.users.apps.UsersConfig',
'eveggie.restaurants.apps.RestaurantsConfig',
'eveggie.orders.apps.OrdersConfig',
'eveggie.reviews.apps.ReviewsConfig',
# Your stuff: custom apps go here
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'eveggie.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Felipe Holanda""", 'azul@eveggie.com'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///eveggie'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://doc
|
s.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# --------------------
|
----------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-ar
|
shobhitmishra/CodingProblems
|
LeetCode/Session3/mincut.py
|
Python
|
mit
| 600
| 0.005
|
class Solution:
def minCut(self, s: str) -> int:
cut = [0] * (len(s) + 1)
cut[0] = -1
ispal = []
for _ in range(len(s)):
ispal.append([False] * len(s))
for i in range(len(s)):
mincut = i
for j in range(i+1):
# if i to j is palindorme
|
if s[i] == s[j] and (i-j <= 2 or ispal[j+1][i-1]):
ispal[j][i] = True
mincut = min(mincut, cut[j] + 1)
cut[i+1] = mincut
return cut[-1]
ob = Solution()
s = "aabbaa"
print(ob.minCut(s))
|
|
TxDOT/python
|
standalone/AdminPrefix_Resourcer_v1.py
|
Python
|
mit
| 3,702
| 0.006753
|
#
#
#March 2014
#Adam Breznicky - TxDOT TPP - Mapping Group
#
#This is an independent script which requires a single parameter designating a directory.
#The script will walk through each subfol
|
der and file within the designated directory, identifying the MXD files
#and re-sourcing the Comanche database connections to utilize the new 'Admin' prefix
#
#
#
#
#import modules
import arcpy, os
#variables
directory = ""
def re_source_admin():
#issue list
issues = []
#walk through each directory
for root, dirs, files in os.walk(directory):
#ignore file and personal geodatabases
specDir = root.split(
|
"\\")[-1]
dbsuffix = specDir.split(".")[-1]
if dbsuffix == "gdb" or dbsuffix == "mdb" or dbsuffix == "tbx":
pass
else:
for n in files:
#identify the mxds
if str(n).split(".")[-1] == "mxd":
print "working on: " + str(os.path.join(root, n))
map = arcpy.mapping.MapDocument(os.path.join(root, n))
dataframes = arcpy.mapping.ListDataFrames(map)
for df in dataframes:
layers = arcpy.mapping.ListLayers(map, "", df)
for lyr in layers:
try:
if "TPP_GIS.MCHAMB1." in lyr.dataSource:
print "lyr source: " + lyr.dataSource
newsource = lyr.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
lyr.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "lyr replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print lyr.name + " is not a feature layer"
tables = arcpy.mapping.ListTableViews(map, "", df)
for tbl in tables:
try:
if "TPP_GIS.MCHAMB1." in tbl.dataSource:
print "tbl source: " + tbl.dataSource
newsource = tbl.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
tbl.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "tbl replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print tbl.name + " is not a feature layer"
map.save()
re_source_admin()
print "success!"
print "the following MXDs contained issues with a layer having not a dataSource (e.g. a non-feature layer):"
for i in issues:
print str(i)
|
ajbouh/tfi
|
src/tfi/parse/docstring.py
|
Python
|
mit
| 17,890
| 0.000671
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Classes for docstring parsing and formatting.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import collections
import inspect
import re
# from six import string_types, u
# from six.moves import range
from .iterators import modify_iter
import sys
def _prepare_docstring(s, ignore=1):
# type: (unicode, int) -> List[unicode]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
Return the docstring as a list of lines usable for inserting into a docutils
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from ignored lines.
for i in range(ignore):
if i < len(lines):
lines[i] = lines[i].lstrip()
if margin < sys.maxsize:
for i in range(ignore, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return lines
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)')
_single_colon_regex = re.compile(r'(?<!:):(?!:)')
_xref_regex = re.compile(r'(:(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)')
_bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)')
_enumerated_list_regex = re.compile(
r'^(?P<paren>\()?'
r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])'
r'(?(paren)\)|\.)(\s+\S|\s*$)')
class GoogleDocstring(object):
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
Other Parameters
----------------
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
def __init__(self, docstring=None, what='', name='',
obj=None, options=None):
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable): # type: ignore
what = 'function'
else:
what = 'object'
if docstring is None:
if obj is None:
raise "If docstring is None, obj may not be"
docstring = obj.__doc__
self._what = what
self._name = name
self._obj = obj
if isinstance(docstring, str):
docstring = _prepare_docstring(docstring)
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[unicode]
self._is_in_section = False
self._section_indent = 0
self._directive_sections = [] # type: List[unicode]
self._entry_sections = {
'args': self._parse_fields_section,
'attributes': self._parse_fields_section,
'returns': self._parse_fields_section,
'yields': self._parse_fields_section,
'example args': self._parse_fields_section,
} # type: Dict[unicode, Callable]
self._freeform_sections = {
'example': self._parse_generic_section,
'examples': self._parse_generic_section,
'example returns': self._parse_generic_section,
'note': self._parse_generic_section,
'references': self._parse_generic_section,
'see also': self._parse_generic_section,
|
'todo': self._parse_generic_section,
} # type: Dict[unicode, Callable]
self._sections = {
name: value
for name, value in [*self._entry_sections.items(), *self._freeform_sections.items()]
}
self._parsed_dicts = {
name: []
for name in self._entry_sections.keys()
}
self._parse()
|
def lines(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return self._parsed_lines
def result(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return {'sections': self._parsed_lines, **self._parsed_dicts}
def _consume_indented_block(self, indent=1):
# type: (int) -> List[unicode]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
# type: () -> List[unicode]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_empty(self):
# type: () -> List[unicode]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
line = next(self._line_iter) # type: ignore
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
if parse_type:
match = _google_typed_arg_regex.match(before) # type: ignore
if match:
_name = match.group(1)
_type = match.group(2)
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_descs = [_desc] + self._dedent(self.
|
EnduranceIndia/ratelimitd
|
Policies/SaslSenderDomainPolicy.py
|
Python
|
apache-2.0
| 3,282
| 0.004875
|
"""
Copyright 2015 Sai Gopal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTI
|
ES OR
|
CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Logger import Logger
from ProfileLookup import ProfileLookup
from RedisConn import RedisConn
class SaslSenderDomainPolicy:
"""
This class provides saslsender domain rate limiting
"""
key = 'sasl_username'
prefix = 'SaslSenderPolicy_'
quota = {}
def __init__(self, parsed_config):
self.parsed_config = parsed_config
self.Enforce = parsed_config.getboolean('SaslSenderDomainPolicy', 'Enforce')
self.RejectMessage = parsed_config.get('SaslSenderDomainPolicy', 'RejectMessage')
self.ProfileLookupObj = ProfileLookup.create_profile_lookup('SaslSenderDomainPolicy', parsed_config)
self.ProfileCacheTTL = parsed_config.getint('SaslSenderDomainPolicy', 'ProfileCacheTime')
for i in parsed_config.items('SaslSenderDomainPolicy-Profiles'):
limits = i[1].split(',')
profile = i[0].lower()
SaslSenderDomainPolicy.quota[profile] = (int(limits[0]), int(limits[1]))
self.value = self.profile = self.error = None
def check_quota(self, message, redis_pipe):
self.error = False
try:
self.value = message.data[self.key].split('@')[1].lower()
self.profile = self.ProfileLookupObj.lookup(self.value, self.ProfileCacheTTL)
RedisConn.LUA_CALL_CHECK_LIMIT(keys=[SaslSenderDomainPolicy.prefix + self.value],
args=[SaslSenderDomainPolicy.quota[self.profile][0]], client=redis_pipe)
except IndexError:
self.error = True
self.message = message
RedisConn.LUA_CALL_DO_NOTHING_SLAVE(keys=[], args=[], client=redis_pipe)
def update_quota(self, redis_pipe):
if self.error:
RedisConn.LUA_CALL_DO_NOTHING_MASTER(keys=[], args=[], client=redis_pipe)
else:
RedisConn.LUA_CALL_INCR(keys=[SaslSenderDomainPolicy.prefix + self.value],
args=[SaslSenderDomainPolicy.quota[self.profile][1]], client=redis_pipe)
def log_quota(self, accept, redis_val=None):
if accept:
if self.error:
Logger.log(
'SaslSenderDomainPolicy Unable To Spilt SaslSender(%s) Action: accept' % (
self.message.data[self.key]))
else:
Logger.log('SaslSenderDomainPolicy SaslSenderDomain: %s Quota: (%s/%s) Profile: %s Action: accept' % (
self.value, str(int(redis_val)), str(SaslSenderDomainPolicy.quota[self.profile][0]), self.profile))
else:
Logger.log('SaslSenderDomainPolicy SaslSenderDomain: %s Quota: Exceeded Profile: %s Action: reject' % (
self.value, self.profile))
|
zygmuntz/numer.ai
|
validate_lr.py
|
Python
|
bsd-3-clause
| 3,382
| 0.041987
|
#!/usr/bin/env python
"Load data, create the validation split, optionally scale data, train a linear model, evaluate"
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer, PolynomialFeatures
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, StandardScaler, RobustScaler
from sklearn.linear_model import LogisticRegression as LR
from sklearn.metrics import roc_auc_score as AUC
from sklearn.metrics import accuracy_score as accuracy
#
def train_and_evaluate( y_train, x_train, y_val, x_val ):
lr = LR()
lr.fit( x_train, y_train )
p = lr.predict_proba( x_val )
p_bin = lr.predict( x_val )
acc = accuracy( y_val, p_bin )
auc = AUC( y_val, p[:,1] )
return ( auc, acc )
def transform_train_and_evaluate( transformer ):
global x_train, x_val, y_train
x_train_new = transformer.fit_transform( x_train )
x_val_new = transformer.transform( x_val )
return train_and_evaluate( y_train, x_train_new, y_val, x_val_new )
#
input_file = 'data/orig/numerai_training_data.csv'
d = pd.read_csv( input_file )
# indices for validation examples
iv = d.validation == 1
val = d[iv].copy()
train = d[~iv].copy()
# no need for validation flag anymore
train.drop( 'validation', axis = 1 , inplace = True )
val.drop( 'validation', axis = 1 , inplace = True )
# encode the categorical variable as one-hot, drop the original column afterwards
train_dummies = pd.get_dummies( train.c1 )
train_num = pd.concat(( train.drop( 'c1', axis = 1 ), train_dummies ), axis = 1 )
val_dummies = pd.get_dummies( val.c1 )
val_num = pd.concat(( val.drop( 'c1', axis = 1 ), val_dummies ), axis = 1 )
#
y_train = train_num.target.values
y_val = val_num.target.values
x_train = train_num.drop( 'target', axis = 1 )
x_val = val_num.drop( 'target', axis = 1 )
# train, predict, evaluate
auc, acc = train_and_evaluate( y_train, x_train, y_val, x_val )
print "No transformation"
print "AUC: {:.2%}, accuracy: {:.2%} \n".format( auc, acc )
# try different transformations for X
transformers = [ MaxAbsScaler(), MinMaxScaler(), RobustScaler(), StandardScaler(),
Normalizer( norm = 'l1' ), Normalizer( norm = 'l2' ), Normalizer( norm = 'max' ),
PolynomialFeatures() ]
poly_scaled = Pipeline([ ( 'poly', PolynomialFeatures()), ( 'scaler', MinMaxScaler()) ])
transformers.append( poly_scaled )
for transformer in transformers:
print transformer
auc, acc = transform_train_and_evaluate( transformer )
print "AUC: {:.2%}, accuracy: {:.2%} \n".format( auc, acc )
"""
No transformation
AUC: 52.67%, accuracy: 52.74%
MaxAbsScaler(copy=True)
AUC: 53.52%, accuracy: 52.46%
MinMaxScaler(copy=True, feature_range=(0, 1))
AUC: 53.52%, accuracy: 52.48%
RobustScaler(copy=True, with_centering=True, with_scaling=True)
AUC: 53.52%, accuracy: 52
|
.45%
StandardScaler(copy=True, with_mean=True, with_std=True)
AUC: 53.52%, accuracy: 52.42%
Normalizer(copy=True, norm='l1')
AUC: 53.16%, accuracy: 53.19%
Normalizer(copy=True, norm='l2')
AUC: 52.92%, accuracy: 53.20%
Normalizer(copy=True, norm='max')
AUC: 53.02%, accuracy: 52.66%
PolynomialFeatures(degree=2, include_bias=True, interaction_only=False)
AUC: 53.25%, accuracy: 52.61%
Pipeline(steps=[
('poly', PolynomialFeatures(degree=2, include_bias=True, interaction_only=False)),
('sca
|
ler', MinMaxScaler(copy=True, feature_range=(0, 1)))])
AUC: 53.62%, accuracy: 53.04%
"""
|
Aclz/Tentacles
|
python3/app/backend/maintree.py
|
Python
|
gpl-2.0
| 10,439
| 0.003172
|
from flask import request, jsonify
from sql_classes import UrlList, Acl, UserGroup, User, Role
def _node_base_and_rest(path):
"""
Returns a tuple: (the substring of a path after the last nodeSeparator, the preceding path before it)
If 'base' includes its own baseSeparator - return only a string after it
So if a path is 'OU=Group,OU=Dept,OU=Company', the tuple result would be ('OU=Group,OU=Dept', 'Company')
"""
node_separator = ','
base_separator = '='
node_base = path[path.rfind(node_separator) + 1:]
if path.find(node_separator) != -1:
node_preceding = path[:len(path) - len(node_base) - 1]
else:
node_preceding = ''
return (node_preceding, node_base[node_base.find(base_separator) + 1:])
def _place_user_onto_tree(user, usertree, user_groups):
"""
Places a 'user' object on a 'usertree' object according to user's pathField string key
"""
curr_node = usertree
# Decompose 'OU=Group,OU=Dept,OU=Company' into ('OU=Group,OU=Dept', 'Company')
preceding, base = _node_base_and_rest(user['distinguishedName'])
full_node_path = ''
# Place all path groups onto a tree starting from the outermost
while base != '':
node_found = False
full_node_path = 'OU=' + base + (',' if full_node_path != '' else '') + full_node_path
# Search for corresponding base element on current hierarchy level
for obj in curr_node:
if obj.get('text') == None:
continue
if obj['text'] == base:
node_found = True
curr_node = obj['children']
break
# Create a new group node
if not node_found:
curr_node.append({
'id': 'usergroup_' + str(user_groups[full_node_path]),
'text': base,
'objectType': 'UserGroup',
'children': []
})
curr_node = curr_node[len(curr_node) - 1]['children']
preceding, base = _node_base_and_rest(preceding)
curr_node.append({
'id': 'user_' + str(user['id']),
'text': user['cn'],
'leaf': True,
'iconCls': 'x-fa fa-user' if user['status'] == 1 else 'x-fa fa-user-times',
'objectType': 'User'
})
def _sort_tree(subtree, sort_field):
"""
Sorts a subtree node by a sortField key of each element
"""
# Sort eval function, first by group property, then by text
subtree['children'] = sorted(
subtree['children'],
key=lambda obj: (1 if obj.get('children') == None else 0, obj[sort_field]))
for tree_elem in subtree['children']:
if tree_elem.get('children') != None:
_sort_tree(tree_elem, sort_field)
def _collapse_terminal_nodes(subtree):
"""
Collapses tree nodes which doesn't contain subgroups, just tree leaves
"""
subtree_has_group_nodes = False
for tree_elem in subtree['children']:
if tree_elem.get('children') != None:
subtree_has_group_nodes = True
_collapse_terminal_nodes(tree_elem)
subtree['expanded'] = subtree_has_group_nodes
def _expand_all_nodes(subtree):
"""
Expand all level nodes
"""
for tree_elem in subtree['children']:
if tree_elem.get('children') != None:
_expand_all_nodes(tree_elem)
subtree['expanded'] = True
def _get_user_tree(current_user_properties, Session):
"""
Build user tree
"""
current_user_permissions = current_user_properties['user_permissions']
session = Session()
# Get all groups
query_result = session.query(UserGroup.id, UserGroup.distinguishedName).all()
user_groups = {}
for query_result_row in query_result:
user_groups[query_result_row.distinguishedName] = query_result_row.id
# Get all users if ViewUsers permission present
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewUsers'), None) != None:
query_result = session.query(
User.id.label('user_id'), User.cn, User.status, UserGroup.id.label('usergroup_id'),
UserGroup.distinguishedName).join(UserGroup).filter(User.hidden == 0).all()
# Get just the requester otherwise
else:
query_result = session.query(
User.id.label('user_id'), User.cn, User.status
|
, UserGroup.id.label('usergroup_id'),
UserGroup.distinguishedName).join(UserGroup).\
filter(User.id == current_user_properties['user_object']['id'], User.hidden == 0).all()
Session.remove()
# Future tree
user_tree = []
# Place each user on a tree
for query_result_row in query_result:
|
user_object = {
'id': query_result_row.user_id,
'distinguishedName': query_result_row.distinguishedName,
'status': query_result_row.status,
'cn': query_result_row.cn
}
_place_user_onto_tree(user_object, user_tree, user_groups)
user_tree = {
'id': 'usergroup_0',
'objectType': 'UserGroup',
'text': 'Пользователи',
'children': user_tree
}
# Sort tree elements
_sort_tree(user_tree, 'text')
# Collapse/expand tree nodes
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewUsers'), None) != None:
_collapse_terminal_nodes(user_tree)
else:
_expand_all_nodes(user_tree)
return user_tree
def _get_url_lists(Session):
"""
Get URL lists
"""
session = Session()
# Get all urllists from DB
query_result = session.query(UrlList.id, UrlList.name, UrlList.whitelist).all()
Session.remove()
urllist_list = []
# Making a list of them
for query_result_row in query_result:
url_list_object = {
'id': 'urllist_' + str(query_result_row.id),
'text': query_result_row.name,
'leaf': True,
'iconCls': 'x-fa fa-unlock' if query_result_row.whitelist else 'x-fa fa-lock',
'objectType': 'UrlList'
}
urllist_list.append(url_list_object)
url_lists = {
'id': 'urllists',
'objectType': 'UrlLists',
'text': 'Списки URL',
'iconCls': 'x-fa fa-cog',
'children': urllist_list
}
# Sort tree elements
_sort_tree(url_lists, 'text')
return url_lists
def _get_acls(Session):
"""
Get ACLs
"""
session = Session()
# Get all access control lists from DB
query_result = session.query(Acl.id, Acl.name).all()
Session.remove()
acl_list = []
# Making a list of them
for query_result_row in query_result:
acl_object = {
'id': 'acl_' + str(query_result_row.id),
'text': query_result_row.name,
'leaf': True,
'iconCls': 'x-fa fa-filter',
'objectType': 'AclContents'
}
acl_list.append(acl_object)
acls = {
'id': 'acls',
'objectType': 'Acls',
'text': 'Списки доступа',
'iconCls': 'x-fa fa-cog',
'children': acl_list
}
# Sort tree elements
_sort_tree(acls, 'text')
return acls
def _get_roles(Session):
"""
Get user roles
"""
session = Session()
# Get all roles from DB
query_result = session.query(Role.id, Role.name).all()
Session.remove()
roles_list = []
# Making a list of them
for query_result_row in query_result:
role_object = {
'id': 'role_' + str(query_result_row.id),
'text': query_result_row.name,
'leaf': True,
'iconCls': 'x-fa fa-key',
'objectType': 'Role'
}
roles_list.append(role_object)
roles = {
'id': 'roles',
'objectType': 'Roles',
'text': 'Роли',
'iconCls': 'x-fa fa-cog',
'children': roles_list
}
# Sorting tree elements
_sort_tree(roles, 'text')
return roles
def select_tree(current_user_properties, node_name, Session):
url_lists_node = None
acls_node = None
roles_node = None
users_node =
|
XKNX/xknx
|
xknx/io/gateway_scanner.py
|
Python
|
mit
| 9,132
| 0.000986
|
"""
GatewayScanner is an abstraction for searching for KNX/IP devices on the local network.
* It walks through all network interfaces
* and sends UDP multicast search requests
* it returns the first found device
"""
from __future__ import annotations
import asyncio
from functools import partial
import logging
from typing import TYPE_CHECKING
import netifaces
from xknx.knxip import (
DIB,
HPAI,
DIBDeviceInformation,
DIBServiceFamily,
DIBSuppSVCFamilies,
KNXIPFrame,
KNXIPServiceType,
SearchRequest,
SearchResponse,
)
from xknx.telegram import IndividualAddress
from .transport import UDPTransport
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class GatewayDescriptor:
"""Used to return information about the discovered gateways."""
def __init__(
self,
ip_addr: str,
port: int,
local_ip: str = "",
local_interface: str = "",
name: str = "UNKNOWN",
supports_routing: bool = False,
supports_tunnelling: bool = False,
supports_tunnelling_tcp: bool = False,
individual_address: IndividualAddress | None = None,
):
"""Initialize GatewayDescriptor class."""
self.name = name
self.ip_addr = ip_addr
self.port = port
self.local_interface = local_interface
self.local_ip = local_ip
self.supports_routing = supports_routing
self.supports_tunnelling = supports_tunnelling
self.supports_tunnelling_tcp = supports_tunnelling_tcp
self.individual_address = individual_address
def parse_dibs(self, dibs: list[DIB]) -> None:
"""Parse DIBs for gateway information."""
for dib in dibs:
if isinstance(dib, DIBSuppSVCFamilies):
self.supports_routing = dib.supports(DIBServiceFamily.ROUTING)
if dib.supports(DIBServiceFamily.TUNNELING):
self.supports_tunnelling = True
self.supports_tunnelling_tcp = dib.supports(
DIBServiceFamily.TUNNELING, version=2
)
continue
if isinstance(dib, DIBDeviceInformation):
self.name = dib.name
self.individual_address = dib.individual_address
continue
def __repr__(self) -> str:
"""Return object as representation string."""
return (
"GatewayDescriptor(\n"
f" name={self.name},\n"
f" ip_addr={self.ip_addr},\n"
f" port={self.port},\n"
f" local_interface={self.local_interface},\n"
f" local_ip={self.local_ip},\n"
f" supports_routing={self.supports_routing},\n"
f" supports_tunnelling={self.supports_tunnelling},\n"
f" supports_tunnelling_tcp={self.supports_tunnelling_tcp},\n"
f" individual_address={self.individual_address}\n"
")"
)
def __str__(self) -> str:
"""Return object as readable string."""
return f"{self.individual_address} - {self.name} @ {self.ip_addr}:{self.port}"
class GatewayScanFilter:
"""Filter to limit gateway scan attempts.
If `tunnelling` and `routing` are set it is treated as AND.
KNX/IP devices that don't support `tunnelling` or `routing` aren't matched.
"""
def __init__(
self,
name: str | None = None,
tunnelling: bool | None = None,
tunnelling_tcp: bool | None = None,
routing: bool | None = None,
):
"""Initialize GatewayScanFilter class."""
self.name = name
self.tunnelling = tunnelling
self.tunnelling_tcp = tunnelling_tcp
self.routing = routing
def match(self, gateway: GatewayDescriptor) -> bool:
"""Check whether the device is a gateway and given GatewayDescriptor matches the filter."""
if self.name is not None and self.name != gateway.name:
return False
if (
self.tunnelling is not None
and self.tunnelling != gateway.supports_tunnelling
):
return False
if (
self.tunnelling_tcp is not None
and self.tunnelling_tcp != gateway.supports_tunnelling_tcp
):
return False
if self.routing is not None and self.routing != gateway.supports_routing:
return False
return (
gateway.supports_tunnelling
or gateway.supports_tunnelling_tcp
or gateway.supports_routing
)
class GatewayScanner:
"""Class for searching KNX/IP devices."""
def __init__(
self,
xknx: XKNX,
timeout_in_seconds: float = 4.0,
stop_on_found: int | None = 1,
scan_filter: GatewayScanFilter = GatewayScanFilter(),
):
"""Initialize GatewayScanner class."""
self.xknx = xknx
self.timeout_in_seconds = timeout_in_seconds
self.stop_on_found = stop_on_found
self.scan_filter = scan_filter
self.found_gateways: list[GatewayDescriptor] = []
self._udp_transports: list[UDPTransport] = []
self._response_received_event = asyncio.Event()
self._count_upper_bound = 0
"""Clean value of self.stop_on_found, computed when ``scan`` is called."""
async def scan(self) -> list[GatewayDescriptor]:
"""Scan and return a list of GatewayDescriptors on success."""
if self.stop_on_found is None:
self._count_upper_bound = 0
else:
self._count_upper_bound = max(0, self.stop_on_found)
await self._send_search_requests()
try:
await asyncio.wait_for(
self._response_received_event.wait(),
timeout=self.timeout_in_seconds,
)
except asyncio.TimeoutError:
pass
finally:
self._stop()
return self.found_gateways
def _stop(self) -> None:
"""Stop tearing down udp_transport."""
for udp_transport in self._udp_transports:
udp_transport.stop()
async def _send_search_requests(self) -> None:
"""Find all interfaces with active IPv4 connection to search for gateways."""
for interface in netifaces.interfaces():
try:
af_inet = netifaces.ifaddresses(interface)[netifaces.AF_INET]
ip_addr = af_inet[0]["addr"]
except KeyError:
logger.debug("No IPv4 address found on %s", interface)
continue
except ValueError as err:
# rare case when an interface disappears during search initialisation
logger.debug("Invalid interface %s: %s", interface, err)
continue
else:
await self._search_interface(interface, ip_addr)
async def _search_interface(self, interface: str, ip_addr: str) -> None:
"""Send a search request on a specific interface."""
logger.debug("Searching on %s / %s", interface, ip_addr)
udp_transport = UDPTransport(
self
|
.xknx,
(ip_addr, 0),
(se
|
lf.xknx.multicast_group, self.xknx.multicast_port),
multicast=True,
)
udp_transport.register_callback(
partial(self._response_rec_callback, interface=interface),
[KNXIPServiceType.SEARCH_RESPONSE],
)
await udp_transport.connect()
self._udp_transports.append(udp_transport)
discovery_endpoint = HPAI(
ip_addr=self.xknx.multicast_group, port=self.xknx.multicast_port
)
search_request = SearchRequest(self.xknx, discovery_endpoint=discovery_endpoint)
udp_transport.send(KNXIPFrame.init_from_body(search_request))
def _response_rec_callback(
self,
knx_ip_frame: KNXIPFrame,
source: HPAI,
udp_transport: UDPTransport,
interface: str = "",
) -> None:
"""Verify and handle knxipframe. Callback from internal udp_transport."""
if not isinstance(knx_ip_frame.body, Sea
|
w1ll1am23/home-assistant
|
homeassistant/components/homeassistant/triggers/state.py
|
Python
|
apache-2.0
| 6,291
| 0.000795
|
"""Offer state listening automation rules."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import CONF_ATTRIBUTE, CONF_FOR, CONF_PLATFORM, MATCH_ALL
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, State, callback
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.event import (
Event,
async_track_same_state,
async_track_state_change_event,
process_state_match,
)
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
CONF_ENTITY_ID = "entity_id"
CONF_FROM = "from"
CONF_TO = "to"
BASE_SCHEMA = {
vol.Required(CONF_PLATFORM): "state",
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
vol.Optional(CONF_ATTRIBUTE): cv.match_all,
}
TRIGGER_STATE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
# These are str on purpose. Want to catch YAML conversions
vol.Optional(CONF_FROM): vol.Any(str, [str]),
vol.Optional(CONF_TO): vol.Any(str, [str]),
}
)
TRIGGER_ATTRIBUTE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
vol.Optional(CONF_FROM): cv.match_all,
vol.Optional(CONF_TO): cv.match_all,
}
)
def TRIGGER_SCHEMA(value: Any) -> dict: # pylint: disable=invalid-name
"""Validate trigger."""
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
# We use this approach instead of vol.Any because
# this gives better error messages.
if CONF_ATTRIBUTE in value:
return TRIGGER_ATTRIBUTE_SCHEMA(value)
return TRIGGER_STATE_SCHEMA(value)
async def async_attach_trigger(
hass: HomeAssistant,
config,
action,
automation_info,
*,
platform_type: str = "state",
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
from_state = config.get(CONF_FROM, MATCH_ALL)
to_state = config.get(CONF_TO, MATCH_ALL)
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
match_all = from_state == MATCH_ALL and to_state == MATCH_ALL
unsub_track_same = {}
period: dict[str, timedelta] = {}
match_from_state = process_state_match(from_state)
match_to_state = process_state_match(to_state)
attribute = config.get(CONF_ATTRIBUTE)
job = HassJob(action)
trigger_id = automation_info.get("trigger_id") if automation_info else None
_variables = {}
if automation_info:
_variables = automation_info.get("variables") or {}
@callback
def state_automation_listener(event: Event):
"""Listen for state changes and calls action."""
entity: str = event.data["entity_id"]
from_s: State | None = event.data.get("old_state")
to_s: State | None = event.data.get("new_state")
if from_s is None:
old_value = None
elif attribute is None:
old_value = from_s.state
else:
old_value = from_s.attributes.get(attribute)
if to_s is None:
new_value = None
elif attribute is None:
new_value = to_s.state
else:
new_value = to_s.attributes.get(attribute)
# When we listen for state changes with `match_all`, we
# will trigger even if just an attribute changes. When
# we listen to just an attribute, we should ignore all
# other attribute changes.
if attribute is not None and old_value == new_value:
return
if (
not match_from_state(old_value)
or not match_to_state(new_value)
or (not match_all and o
|
ld_value == new_value)
):
return
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": platform_type,
"entity_id": entity,
"from_state": from_s,
"to
|
_state": to_s,
"for": time_delta if not time_delta else period[entity],
"attribute": attribute,
"description": f"state of {entity}",
"id": trigger_id,
}
},
event.context,
)
if not time_delta:
call_action()
return
trigger_info = {
"trigger": {
"platform": "state",
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
}
}
variables = {**_variables, **trigger_info}
try:
period[entity] = cv.positive_time_period(
template.render_complex(time_delta, variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s", automation_info["name"], ex
)
return
def _check_same_state(_, _2, new_st: State):
if new_st is None:
return False
if attribute is None:
cur_value = new_st.state
else:
cur_value = new_st.attributes.get(attribute)
if CONF_FROM in config and CONF_TO not in config:
return cur_value != old_value
return cur_value == new_value
unsub_track_same[entity] = async_track_same_state(
hass,
period[entity],
call_action,
_check_same_state,
entity_ids=entity,
)
unsub = async_track_state_change_event(hass, entity_id, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
|
andr-04/Telethon
|
telethon/network/__init__.py
|
Python
|
mit
| 191
| 0
|
from
|
.mtproto_plain_sender import MtProtoPlainSender
from .authenticator import do_authentication
from .mtproto_sender import MtProtoSender
from .connection import Connection,
|
ConnectionMode
|
SkyZH/ddcm-word-count
|
wordcount/test/const.py
|
Python
|
bsd-3-clause
| 119
| 0
|
url = "https
|
://skyzh.github.io/social-network-site/1.html"
html_path = "wordcount/test/data/social.html"
devel = False
| |
rboman/progs
|
apps/fractal/cpp_qt/run.py
|
Python
|
apache-2.0
| 179
| 0
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
from build import *
addroot()
|
import pytools.build as b
|
b.build()
b.run('qtfract')
|
flavoi/diventi
|
diventi/blog/migrations/0008_auto_20190411_0806.py
|
Python
|
apache-2.0
| 574
| 0
|
# Generated by Django 2.1.7 on 2019-04-11 06:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0
|
007_auto_20180526_1702'),
]
operations = [
migrations.AlterField(
model_name='article',
name='image
|
',
field=models.URLField(blank=True, verbose_name='image'),
),
migrations.AlterField(
model_name='blogcover',
name='image',
field=models.URLField(blank=True, verbose_name='image'),
),
]
|
harshkothari410/snn-image-segmentation
|
imageSeg.py
|
Python
|
mit
| 4,893
| 0.02943
|
from genetic import *
from image import *
from snn import *
import math, random
import numpy
def convert_binary(data, w, h, t):
ans = [[0 for x in xrange(w)] for x in xrange(h)]
for x in xrange(h):
for y in xrange(w):
if data[x][y] > t:
ans[x][y] = 1
else:
ans[x][y] = 0
return ans
def convert_mat(data, w, h, thresh):
ans = [[0 for x in xrange(w)] for x in xrange(h)]
for x in xrange(h):
for y in xrange(w):
if data[x][y] > thresh[x][y]:
ans[x][y] = 1
else:
ans[x][y] = 0
return ans
def shenon_entropy(data, w, h):
black, white = 0,0
for x in xrange(h):
for y in xrange(w):
if data[x][y]:
white += 1
else:
black += 1
total = w*h
prob_white = white / (total*1.0)
prob_black = black / (total*1.0)
formula = - (prob_black * math.log(prob_black,2) + prob_white * math.log(prob_white, 2))
return formula
def fitness(population, data, w, h):
fitness_dict = {}
for x in population:
ans = convert_binary(data, w, h, x)
entropy = shenon_entropy(ans, w, h)
if entropy in fitness_dict:
entropy = entropy + random.random()/1000
fitness_dict[entropy] = x
# imagewrite(ans, w, h)
# print entropy, x
return fitness_dict
def fitness_mat(population, data, w, h):
fitness_dict = {}
for x in population:
ans = convert_mat(data, w, h, x)
entropy = shenon_entropy( ans , w, h)
if entropy in fitness_dict:
entropy = entropy + random.random()/1000
fitness_dict[entropy] = x
return fitness_dict
def fitness_weight(population, w, h, t, ind):
fitness_dict = {}
for y, x in enumerate(population):
ans = convert_binary(x, w, h, t)
entropy = shenon_entropy(ans, w, h)
if entropy in fitness_dict:
entropy = entropy + random.random()/1000
fitness_dict[entropy] = ind[y]
return fitness_dict
# read
|
image
pixel, w, h = imageread('test8.jpg'
|
)
# convert to snn
pixel_mat = snn_response(pixel, w, h, 10, 0.05)
d=3
def weight(x1,y1,x,y):
w = 10*math.exp(- ( ( math.pow((x1-x),2)+math.pow((y1-y),2) + math.pow(pixel[x1][y1]-pixel[x][y],2 )/d ) ) )
return w
def second_layer_locality():
second_layer = [[0 for x in xrange(w+2)] for x in xrange(h+2)]
for x in xrange(1,h-1):
for y in xrange(1,w-1):
temp = {}
for i in xrange(x-1, x+1):
for j in xrange(y-1, y+1):
temp[pixel_mat[i][j]] = weight(x,y,i,j)
second_layer[x][y] = response_time(temp)
second_layer = numpy.delete(second_layer, (0), axis=0)
second_layer = numpy.delete(second_layer, len(second_layer)-1, axis=0)
second_layer = numpy.delete(second_layer, (0), axis=1)
second_layer = numpy.delete(second_layer, len(second_layer[0])-1, axis=1)
return second_layer
def second_layer(w_mat):
second_layer = [[0 for x in xrange(w+2)] for x in xrange(h+2)]
for x in xrange(h):
for y in xrange(w):
second_layer[x][y] = response_time({pixel_mat[x][y]:w_mat[x][y]})
return second_layer
def median_filter(mat, w, h):
for x in xrange(h):
for y in xrange(w):
if mat[x][y] < numpy.median(mat):
mat[x][y]=0
return mat
# ==================== SNN Weight ====================
# print "Started "
# population1 = init_three_dim_mat(1, 3,3, 9)
# print population1
# ==================== SNN Matrix ====================
print "Starting GA ..."
population1 = init_mat(10,w,h)
print "Population created ..."
t = 5.0
final = []
for x in xrange(16):
print "Performing Iteration :", x+1
sl = []
for pop in population1:
temp = second_layer(pop)
sl.append(temp)
a = fitness_weight(sl, w, h, t , population1)
population1, m, max = crossover_mat( a, w, h )
print "Maximum fitness for this generation :",max
print "======================================"
sl = second_layer(m)
ans = convert_binary(sl, w, h, t)
final = sl
if x % 4 == 0:
imagesave(ans, w, h, 't6test8gen ' + str(x) + ' fit ' )
imagewrite(ans, w, h)
# print len(final)
# x = median_filter(final, w, h)
# print 'shannon entropy : ',shenon_entropy( x , w, h)
# imagewrite(x, w, h)
# if x % 5 == 0:
# # ans = convert_mat(pixel_mat, w, h, m)
# imagesave(ans, w, h, 'gen ' + str(x) + ' fit ' )
# for x in xrange(11):
# # a = fitness_mat(population1, pixel_mat, w, h)
# a = fitness_mat(population1, second_layer, w, h)
# population1, m, max = crossover_mat( a, w, h )
# print max
# if x % 5 == 0:
# # ans = convert_mat(pixel_mat, w, h, m)
# ans = convert_mat(second_layer, w, h, m)
# imagewrite(ans, w, h)
# imagesave(ans, w, h, 'gen ' + str(x) + ' fit ' )
# print "==========="
# ==================== SNN Int =======================
# imagewrite(ans, w, h)
# initialize population
# population1 = init_num(8)
# for x in xrange(11):
# a = fitness(population1, second_layer, w, h)
# population1, m, max = crossover_num( a )
# if x % 5 == 0:
# ans = convert_binary(second_layer, w, h, m)
# imagewrite(ans, w, h)
# imagesave(ans, w, h, 'gen ' + str(x) + ' fit ' + str(m))
# print "==========="
# print max
|
plasmashadow/training-python
|
time/sorted-list.py
|
Python
|
gpl-2.0
| 1,566
| 0.02235
|
# -----------------------------------------------------------
# compares the creation of sorted lists using the python
# bisect module, and the "usual" way
#o
# (C) 2015 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# import standard modules
import bisect, random, time
def sortListDefault():
# define empty list, an
|
d fill with 200000 randomized integers
sortedNumbers = []
for element in range(200000):
# choose a number between 0 and 1000
newNumber = random.randint(0, 1000)
# add number to list
#print ("adding %i to list ... " %newNumber)
so
|
rtedNumbers.append(newNumber)
# sort the list in-place
sortedNumbers.sort()
return
def sortListBisect():
# define empty list, and fill with 200000 randomized integers
sortedNumbers = []
for element in range(200000):
# choose a number between 0 and 1000
newNumber = random.randint(0, 1000)
#print ("adding %i to list ... " %newNumber)
# insert into sorted list
bisect.insort(sortedNumbers, newNumber)
return
# evaluate default sort
startTime1 = time.time()
listPosition = sortListDefault()
endTime1 = time.time()
# calculate and output interval time
seconds = endTime1 - startTime1
print ("default sort took %.8f seconds" % seconds)
# evaluate bisect sort
startTime1 = time.time()
listPosition = sortListBisect()
endTime1 = time.time()
# calculate and output interval time
seconds = endTime1 - startTime1
print ("bisect sort took %.8f seconds" % seconds)
|
linvictor88/vse-lbaas-driver
|
quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py
|
Python
|
apache-2.0
| 29,895
| 0
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Quantum.
# Based on the structure of the OpenVSwitch agent in the
# Quantum OpenVSwitch Plugin.
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import os
import sys
import time
import eventlet
from oslo.config import cfg
import pyudev
from quantum.agent.linux import ip_lib
from quantum.agent.linux import utils
from quantum.agent import rpc as agent_rpc
from quantum.agent import securitygroups_rpc as sg_rpc
from quantum.common import config as logging_config
from quantum.common import constants
from quantum.common import topics
from quantum.common import utils as q_utils
from quantum import context
from quantum.openstack.common import log as logging
from quantum.openstack.common import loopingcall
from quantum.openstack.common.rpc import common as rpc_common
from quantum.openstack.common.rpc import dispatcher
from quantum.plugins.linuxbridge.common import config # noqa
from quantum.plugins.linuxbridge.common import constants as lconst
LOG = logging.getLogger(__name__)
BRIDGE_NAME_PREFIX = "brq"
TAP_INTERFACE_PREFIX = "tap"
BRIDGE_FS = "/sys/devices/virtual/net/"
BRIDGE_NAME_PLACEHOLDER = "bridge_name"
BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/"
DEVICE_NAME_PLACEHOLDER = "device_name"
BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport"
class LinuxBridgeManager:
def __init__(self, interface_mappings, root_helper):
self.interface_mappings = interface_mappings
self.root_helper = root_helper
self.ip = ip_lib.IPWrapper(self.root_helper)
self.udev = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(self.udev)
monitor.filter_by('net')
def device_exists(self, device):
"""Check if ethernet device exists."""
try:
utils.execute(['ip', 'link', 'show', 'dev', device],
root_helper=self.root_helper)
except RuntimeError:
return False
return True
def interface_exists_on_bridge(self, bridge, interface):
directory = '/sys/class/net/%s/brif' % bridge
for filename in os.listdir(directory):
if filename == interface:
return True
return False
def get_bridge_name(self, network_id):
if not network_id:
LOG.warning(_("Invalid Network ID, will lead to incorrect bridge"
"name"))
bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
subinterface_name = '%s.%s' % (physical_interface, vlan_id)
return subinterface_name
def get_tap_device_name(self, interface_id):
if not interface_id:
LOG.warning(_("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = TAP_INTERFACE_PREFIX + interface_id[0:11]
return tap_device_name
def get_all_quantum_bridges(self):
quantum_bridge_list = []
bridge_list = os.listdir(BRIDGE_FS)
for bridge in bridge_list:
if bridge.startswith(BRIDGE_NAME_PREFIX):
quantum_bridge_list.append(bridge)
return quantum_bridge_list
def get_interfaces_on_bridge(self, bridge_name):
if self.device_exists(bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
return os.listdir(bridge_interface_path)
def get_bridge_for_tap_device(self, tap_device_name):
bridges = self.get_all_quantum_bridges()
for bridge in bridges:
interfaces = self.get_interfaces_on_bridge(bridge)
if tap_device_name in interfaces:
return bridge
return None
def is_device_on_bridge(self, device_name):
if not device_name:
return False
else:
bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace(
DEVICE_NAME_PLACEHOLDER, device_name)
return os.path.exists(bridge_port_path)
def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
bridge_name = self.get_bridge_name(network_id)
if self.ensure_bridge(bridge_name, interface):
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, physical_interface):
"""Create a non-vlan bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips, gateway):
return physical_interface
def ensure_local_bridge(self, network_id):
"""Create a local bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it alr
|
eady exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not self.device_exists(interface):
LOG.debug(_("Creating subinterface %(interface)
|
s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s"),
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
if utils.execute(['ip', 'link', 'add', 'link',
physical_interface,
'name', interface, 'type', 'vlan', 'id',
vlan_id], root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'set',
interface, 'up'], root_helper=self.root_helper):
return
LOG.debug(_("Done creating subinterface %s"), interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(ip_version=ip['ip_version'],
cidr=ip['cidr'],
broadcast=ip['broadcast'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
|
ypid-bot/check_mk
|
web/htdocs/table.py
|
Python
|
gpl-2.0
| 15,132
| 0.005155
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config
from lib import num_split
table = None
mode = None
next_func = None
row_css = None
def begin(table_id=None, title=None, **kwargs):
global table, mode, next_func
# Use our pagename as table id if none is specified
if table_id == None:
table_id = html.myfile
try:
limit = config.table_row_limit
except:
limit = None
limit = kwargs.get('limit', limit)
if html.var('limit') == 'none' or kwargs.get("output_format", "html") != "html":
limit = None
table = {
"id" : table_id,
"title" : title,
"headers" : [],
"collect_headers" : False, # also: True, "finished"
"rows" : [],
"limit" : limit,
"omit_if_empty" : kwargs.get("omit_if_empty", False),
"omit_h
|
eaders" : kwargs.get("omit_headers", False),
"searchable" : kwargs.get("searchable", True),
|
"sortable" : kwargs.get("sortable", True),
"next_header" : None,
"output_format" : kwargs.get("output_format", "html"), # possible: html, csv, fetch
}
if kwargs.get("empty_text"):
table["empty_text"] = kwargs["empty_text"]
else:
table["empty_text"] = _("No entries.")
if kwargs.get("help"):
table["help"] = kwargs["help"]
if kwargs.get("css"):
table["css"] = kwargs["css"]
html.plug()
mode = 'row'
next_func = None
def finish_previous():
global next_func
if next_func:
next_func(*next_args[0], **next_args[1])
next_func = None
def row(*posargs, **kwargs):
finish_previous()
global next_func, next_args
next_func = add_row
next_args = posargs, kwargs
def add_row(css=None, state=0, collect_headers=True, fixed=False):
if table["next_header"]:
table["rows"].append((table["next_header"], None, "header", True))
table["next_header"] = None
table["rows"].append(([], css, state, fixed))
if collect_headers:
if table["collect_headers"] == False:
table["collect_headers"] = True
elif table["collect_headers"] == True:
table["collect_headers"] = "finished"
elif not collect_headers and table["collect_headers"]:
table["collect_headers"] = False
# Intermediate title, shown as soon as there is a following row.
# We store the group headers in the list of rows, with css None
# and state set to "header"
def groupheader(title):
table["next_header"] = title
def cell(*posargs, **kwargs):
finish_previous()
global next_func, next_args
next_func = add_cell
next_args = posargs, kwargs
def add_cell(title="", text="", css=None, help=None, colspan=None, sortable=True):
if type(text) != unicode:
text = str(text)
htmlcode = text + html.drain()
if table["collect_headers"] == True:
# small helper to make sorting introducion easier. Cells which contain
# buttons are never sortable
if css and 'buttons' in css and sortable:
sortable = False
table["headers"].append((title, help, sortable))
table["rows"][-1][0].append((htmlcode, css, colspan))
def end():
global table
finish_previous()
html.unplug()
if not table:
return
# Output-Format "fetch" simply means that all data is being
# returned as Python-values to be rendered somewhere else.
if table["output_format"] == "fetch":
return table["headers"], table["rows"]
if table["output_format"] == "csv":
do_csv = True
csv_separator = html.var("csv_separator", ";")
else:
do_csv = False
if not table["rows"] and table["omit_if_empty"]:
table = None
return
#html.guitest_record_output("data_tables", table)
if table["title"] and not do_csv:
html.write("<h3>%s</h3>" % table["title"])
if table.get("help") and not do_csv:
html.help(table["help"])
if not table["rows"] and not do_csv:
html.write("<div class=info>%s</div>" % table["empty_text"])
table = None
return
table_id = table['id']
rows = table["rows"]
# Controls wether or not actions are available for a table
search_term = None
actions_enabled = (table["searchable"] or table["sortable"]) and not do_csv
if actions_enabled:
user_opts = config.user.load_file("tableoptions", {})
user_opts.setdefault(table_id, {})
table_opts = user_opts[table_id]
# Handle the initial visibility of the actions
actions_visible = user_opts[table_id].get('actions_visible', False)
if html.var('_%s_actions' % table_id):
actions_visible = html.var('_%s_actions' % table_id) == '1'
user_opts[table_id]['actions_visible'] = actions_visible
if html.var('_%s_reset' % table_id):
html.del_var('_%s_search' % table_id)
if 'search' in table_opts:
del table_opts['search'] # persist
if table["searchable"]:
# Search is always lower case -> case insensitive
search_term = html.get_unicode_input('_%s_search' % table_id, table_opts.get('search', '')).lower()
if search_term:
html.set_var('_%s_search' % table_id, search_term)
table_opts['search'] = search_term # persist
filtered_rows = []
for row, css, state, fixed in rows:
if state == "header" or fixed:
filtered_rows.append((row, css, state, fixed))
continue # skip filtering of headers or fixed rows
for cell_content, css_classes, colspan in row:
if search_term in cell_content.lower():
filtered_rows.append((row, css, state, fixed))
break # skip other cells when matched
rows = filtered_rows
if html.var('_%s_reset_sorting' % table_id):
html.del_var('_%s_sort' % table_id)
if 'sort' in table_opts:
del table_opts['sort'] # persist
if table["sortable"]:
# Now apply eventual sorting settings
sort = html.var('_%s_sort' % table_id, table_opts.get('sort'))
if sort != None:
html.set_var('_%s_sort' % table_id, sort)
table_opts['sort'] = sort # persist
sort_col, sort_reverse = map(int, sort.split(',', 1))
# remove and remind fixed rows, add to separate list
fixed_rows = []
for index, row in enumerate(rows[:]):
|
policycompass/policycompass-services
|
apps/referencepool/management/commands/harvest.py
|
Python
|
agpl-3.0
| 2,610
| 0.001149
|
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from apps.referencepool.models import *
import requests
import json
import os
__author__ = 'fki'
class Command(BaseCommand):
help = 'Harvest external resources to fill the Reference Pool'
def handle(self, *args, **options):
if args:
for a in args:
try:
func = getattr(self, '_harvest_' + a)
except AttributeError:
self.stdout.write('No such Harvester')
func()
else:
self.stdout.write('Harvesting everything')
for f in dir(self):
if f.startswith('_harvest_'):
getattr(self, f)()
def _harvest_languages(self):
self.stdout.write('Harvesting Languages')
url = 'http://data.okfn.org/data/core/language-codes/r/language-codes.json'
result = json.loads((requests.get(url)).text)
for lang in result:
try:
l = Language(code=lang['alpha2'], title=lang['English'])
l.save()
except IntegrityError:
pass
self.stdout.write('Successfully Harvested Languages')
def _harvest_countries(self):
self.stdout.write('Harvesting Countries')
url = 'http://data.okfn.org/data/core/country-codes/r/country-codes.json'
result = json.loads((requests.get(url)).text)
country_class = DataClass.objects.get(title='Country')
for country in result:
try:
c = Individual(data_class=country_class, title=country['name'],
code=country['ISO3166-1-Alpha-3'])
c.save()
except IntegrityError:
pass
def _harvest_external_resources(self):
self.stdout.write('Harvesting External Resources')
result = self._file_to_json('../../resources/open-data-monitor.json')
for resource in result:
try:
name = result[resource]['col_1'].replace('_', '.').replace('-',
|
'.')
url = 'http://' + name
r = ExternalResource(title=name, url=url, api_url=url)
r.save()
except IntegrityError:
pass
def _file_to_json(self, rel_path):
dir = os.pat
|
h.dirname(__file__)
abs_path = os.path.join(dir, rel_path)
with open(abs_path, "r") as file:
data = json.load(file)
return data
|
iLotus/googleads-adsensehost-examples
|
python/v4.x/get_all_ad_units_for_publisher.py
|
Python
|
apache-2.0
| 2,497
| 0.005607
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units in a publisher ad client.
To get ad clients, run get_all_ad_clients_for_publisher.py.
Tags: accounts.adunits.list
"""
__author__ = 'jalc@google.com (Jose Alcerreca)'
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'account_id',
help='The ID of the pub account on which the ad unit exists')
argparser.add_argument(
'ad_client_id',
help='The ID of the ad client on which the ad unit exists')
MAX_PAGE_SIZE = 50
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'adsensehost', 'v4.1', __doc__, __file__, parents=[argparser])
ad_client_id = flags.ad_client_id
account_id = flags.account_id
try:
# Retrieve ad unit list in pages and display data as we receive it.
request = service.accounts().adunits().list(adClientId=ad_client_id,
accountId=account_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
if 'items' in result:
ad_units = result['items']
for ad_unit in ad_units:
print ('Ad unit with ID "%s", code "%s", name "%s" and status "%s" '
'was found.' %
(ad_unit['id'], ad_unit['code'], ad_unit['name'],
|
ad_unit['status']))
request = service.accounts().adunits().list_next(request, res
|
ult)
else:
print 'No ad units were found.'
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
ElBell/VTDairyDB
|
manage.py
|
Python
|
gpl-3.0
| 10,171
| 0.005309
|
import sys, os
from main import app
from flask_script import Manager, Server, Command, Option
from flask_security.utils import encrypt_password
from models import db, populate_db, StatusData, GrowthData, LifeData, GrowthDataAverages
from main import app
import random
from datetime import date, datetime
import pandas
from tqdm import tqdm
from dateutil import parser
from sqlalchemy import desc
class ResetDB(Command):
"""Drops all tables and recreates them"""
def run(self, **kwargs):
db.drop_all()
db.create_all()
class PopulateDB(Command):
option_list = (
Option('--file', '-f', dest='user_data_file', default='scripts/user_data.csv'),
)
"""Fills in predefined data into DB"""
def run(self, user_data_file, **kwargs):
print("Complete")
populate_db()
def parse_float(val):
try:
float(val)
return float(val)
except ValueError:
if val in (".", ""):
return None
print(val)
raise Exception
class ConvertAll(Command):
def run(self):
for animal in GrowthData.query.all():
animal.weight = animal.weight*0.453592 if isinstance(animal.weight, (int, float)) else None
animal.height = animal.height*2.54 if isinstance(animal.height, (int, float)) else None
animal.lifetime_adg = animal.lifetime_adg*0.453592 if isinstance(animal.lifetime_adg, (int, float)) else None
animal.monthly_adg = animal.monthly_adg*0.453592 if isinstance(animal.monthly_adg, (int, float)) else None
animal.monthly_height_change = (animal.monthly_height_change/10) * 25.4 if isinstance(animal.monthly_height_change, (int, float)) else None
db.session.commit()
print("GrowthData converted")
for animal in LifeData.query.all():
animal.bwt = animal.bwt*0.453592 if isinstance(animal.bwt, (int, float)) else None
db.session.commit()
print("LifeData converted")
class ProcessLifeData(Command):
option_list = (
Option('--file', '-f', dest='full_filename', default='data/lifeData.csv'),
)
def run(self, full_filename):
data = pandas.read_csv(full_filename)
data = data.dropna()
# Removes the first two rows of not data
data = data.ix[4:]
# Labels the columns as follows (so columns MUST BE IN THIS ORDER)
data.columns = ['FID', 'EID', 'Breed', 'DOB']
#app.logger.info(data)
for index, row in data.iterrows():
life = LifeData.query.filter_by(fid=row['FID']).first()
if life is None:
life = LifeData(fid=row['FID'], eid=row['EID'], breed=row['Breed'], dob=parser.parse(row['DOB']))
db.session.add(life)
else:
life.dob=parser.parse(row['DOB'])
life.breed=row['Breed']
life.eid=row['EID']
# Add won't happen without it
db.session.commit()
class ProcessGrowthData(Command):
option_list = (
Option('--file', '-f', dest='full_filename', default='data/growthData.xlsx'),
)
def run(self, full_filename):
data = pandas.read_excel(full_filename)
data = data.set_index('Index')
status_data, growth_data_old, growth_data_new = data.ix[:, :6], data.ix[:, 6:158], data.ix[:, 158:]
# print(growth_data_old.index)
for index, row in tqdm(status_data.iterrows()):
status = StatusData(fid=int(index), status=row['Status'], status_date=None if pandas.isnull(row['Date']) else row['Date'])
db.session.add(status)
life = LifeData.query.filter_by(fid=int(index)).first()
if life is None:
life = LifeData(fid=int(index), bwt=row['BWt'], dob=row['Birthdate'], breed=row['Brd'], estimate=True if type(row['Estimate']) is unicode else False)
db.session.add(life)
else:
|
if life.bwt is None:
life.bwt = row['BWt']
life.dob = row['Birthdate']
life.breed = row['Brd']
life.estimate = True if type(row['Estimate']) is unicode else False
db.session.commit()
growth_data_old
|
.columns = pandas.MultiIndex.from_tuples([(c[:-1], c[-1]) for c in growth_data_old.columns])
for row_name, row in tqdm(growth_data_old.iterrows()):
row = row.where((pandas.notnull(row)), None)
for date_name, weight_data in row.unstack().iterrows():
weight = weight_data['W'] if type(weight_data['W']) == int or type(weight_data['W']) == float else None
date = weight_data['D'] if type(weight_data['D']) != pandas.tslib.NaTType else None
location = weight_data['L'] if type(weight_data['L']) != pandas.tslib.NaTType else None
height = weight_data['H'] if type(weight_data['H']) != pandas.tslib.NaTType else None
# print(row_name, weight, date, location, height)
if weight is None:
continue
measurement = GrowthData.new(fid=int(row_name), date=date, weight=weight, height=parse_float(height) if height is not None else height, location=location)
db.session.add(measurement)
# print("Adding weighing "+str(row_name)+", "+date_name+":", weight_data.get('D', date_name), weight_data['L'], weight_data['W'], weight_data['H'])
db.session.commit()
growth_data_new.columns = pandas.MultiIndex.from_tuples([(c[:-1], c[-1]) for c in growth_data_new.columns])
for row_name, row in tqdm(growth_data_new.iterrows()):
row = row.where((pandas.notnull(row)), None)
for date_name, weight_data in row.unstack().iterrows():
date = datetime.strptime(date_name, '%y%m%d').date()
weight = weight_data['W'] if type(weight_data['W']) == int or type(weight_data['W']) == float else None
location = weight_data['L'] if type(weight_data['L']) != pandas.tslib.NaTType else None
bcs = weight_data['C']
# print(type(bcs))
height = weight_data['H'] if type(weight_data['H']) != pandas.tslib.NaTType else None
#print(row_name, weight, date, location, height)
if weight is None:
continue
measurement = GrowthData.new(fid=int(row_name), bcs=parse_float(bcs) if bcs is not None else bcs, location=location, date=date, weight=weight, height=parse_float(height) if height is not None else height)
db.session.add(measurement)
# print("Adding weighing "+str(row_name)+", "+date_name+":", weight_data['C'], weight_data.get('D', date_name), weight_data['L'], weight_data['W'], weight_data['H'])
db.session.commit()
class CalculateGrowthAverageData(Command):
def run(self):
fids = db.session.query(GrowthData.fid).distinct()
for fid in tqdm(fids):
fid_data = db.session.query(GrowthData).filter(GrowthData.fid == fid.fid).order_by(desc(GrowthData.date)).all()
today = fid_data[0]
growth_averages = GrowthDataAverages.query.filter_by(fid=int(fid.fid)).first()
life_data = LifeData.query.filter_by(fid=int(fid.fid)).first()
if len(fid_data) > 1:
previous = fid_data[1]
time_dif = today.date - previous.date
time_dif = time_dif.days
monthly_weight_dif = float(today.weight - previous.weight)
monthly_adg = float(monthly_weight_dif/time_dif)
if previous.height is not None and today.height is not None:
monthly_height_dif = float(today.height - previous.height)
monthly_height_change = float(monthly_height_dif/time_dif)
else:
monthly_height_change = None
age = today.date - life_data.dob
age = age.days
lifetime_weight_dif = float(today.weight - life_data.bwt)
lifetime_adg = float(lifetime_weight_dif/age)
if gro
|
vfiebig/rpaas
|
tests/test_storage.py
|
Python
|
bsd-3-clause
| 5,053
| 0.000396
|
# coding: utf-8
# Copyright 2015 rpaas authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from rpaas import plan, storage
class MongoDBStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage = storage.MongoDBStorage()
self.storage.db[self.storage.quota_collection].remove()
self.storage.db[self.storage.plans_collection].remove()
self.storage.db[self.storage.plans_collection].insert(
{"_id": "small",
"description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
)
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}}
)
def test_set_team_quota(self):
q = self.storage.set_team_quota("myteam", 8)
used, quota = self.storage.find_team_quota("myteam")
self.assertEqual([], used)
self.assertEqual(8, quota)
self.assertEqual(used, q["used"])
self.assertEqual(quota, q["quota"])
def test_list_plans(self):
plans = self.storage.list_plans()
expected = [
{"name": "small", "description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}},
{"name": "huge", "description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}},
]
self.assertEqual(expected, [p.to_dict() for p in plans])
def test_find_plan(self):
plan = self.storage.find_plan("small")
expected = {"name": "small", "description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
self.assertEqual(expected, plan.to_dict())
with self.assertRaises(storage.PlanNotFoundError):
self.storage.find_plan("something that doesn't exist")
def test_store_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
got_plan = self.storage.find_plan(p.name)
|
self.assertEqual(p.to_dict(), got_plan.to_dict())
def test_store_plan_duplicate(self):
p = plan.Plan(name="small", description="small thing",
config={"serviceofferingid": "abcdef123"})
with self.assertRaises(storage.DuplicateError):
self.storage.store_plan(p)
def test_upda
|
te_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.update_plan(p.name, description="wat?",
config={"serviceofferingid": "abcdef123459"})
p = self.storage.find_plan(p.name)
self.assertEqual("super_huge", p.name)
self.assertEqual("wat?", p.description)
self.assertEqual({"serviceofferingid": "abcdef123459"}, p.config)
def test_update_plan_partial(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.update_plan(p.name, config={"serviceofferingid": "abcdef123459"})
p = self.storage.find_plan(p.name)
self.assertEqual("super_huge", p.name)
self.assertEqual("very huge thing", p.description)
self.assertEqual({"serviceofferingid": "abcdef123459"}, p.config)
def test_update_plan_not_found(self):
with self.assertRaises(storage.PlanNotFoundError):
self.storage.update_plan("my_plan", description="woot")
def test_delete_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.delete_plan(p.name)
with self.assertRaises(storage.PlanNotFoundError):
self.storage.find_plan(p.name)
def test_delete_plan_not_found(self):
with self.assertRaises(storage.PlanNotFoundError):
self.storage.delete_plan("super_huge")
def test_instance_metadata_storage(self):
self.storage.store_instance_metadata("myinstance", plan="small")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertEqual({"_id": "myinstance",
"plan": "small"}, inst_metadata)
self.storage.store_instance_metadata("myinstance", plan="medium")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertEqual({"_id": "myinstance", "plan": "medium"}, inst_metadata)
self.storage.remove_instance_metadata("myinstance")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertIsNone(inst_metadata)
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/vest/shared_vest_s03.py
|
Python
|
mit
| 450
| 0.046667
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/vest/shared_vest_s03.iff"
result.attribute_template_id = 11
result.stfName("wearables_name"
|
,"vest_s03")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS
|
####
return result
|
writefaruq/lionface-app
|
django/contrib/staticfiles/handlers.py
|
Python
|
bsd-3-clause
| 2,733
| 0.001098
|
import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSG
|
I middleware that intercepts calls to the static files directory, as
defined by the STATICFILES_URL setting, and serves those files.
"""
def __init__(self, applicatio
|
n, media_dir=None):
self.application = application
if media_dir:
self.media_dir = media_dir
else:
self.media_dir = self.get_media_dir()
self.media_url = urlparse(self.get_media_url())
if settings.DEBUG:
utils.check_settings()
super(StaticFilesHandler, self).__init__()
def get_media_dir(self):
return settings.STATICFILES_ROOT
def get_media_url(self):
return settings.STATICFILES_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the media_url
* the request's path isn't under the media path (or equal)
* settings.DEBUG isn't True
"""
return (self.media_url[2] != path and
path.startswith(self.media_url[2]) and not self.media_url[1])
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``media_url``. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ``media_url``.
relative_url = url[len(self.media_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
|
capless/kev
|
kev/backends/__init__.py
|
Python
|
gpl-3.0
| 3,810
| 0.003412
|
import json
import hashlib
import uuid
import datetime
from valley.exceptions import ValidationException
from kev.utils import get_doc_type
from kev.query import SortingParam
class DocDB(object):
db_class = None
indexer_class = None
backend_id = None
doc_id_string = '{doc_id}:id:{backend_id}:{class_name}'
index_id_string = ''
def save(self, doc_obj):
raise NotImplementedError
def delete(self, doc_obj):
raise NotImplementedError
def get(self, doc_obj, doc_id):
raise NotImplementedError
def parse_id(self, doc_id):
try:
return doc_id.split(':')[0]
except TypeError:
return doc_id.decode().split(':')[0]
def create_pk(self, doc_obj,doc):
doc = doc.copy()
doc['_date'] = str(datetime.datetime.now())
doc['_uuid'] = str(uuid.uuid4())
hash_pk = hashlib.md5(bytes(json.dumps(doc),'utf-8')).hexdigest()[:10]
doc_obj.set_pk(self.doc_id_string.format(doc_id=hash_pk,
backend_id=self.backend_id, class_name=doc_obj.get_class_name()))
return doc_obj
def check_unique(self, doc_obj, key, value):
obj = doc_obj.objects().filter({key: value})
if len(obj) == 0:
return True
if hasattr(doc_obj, '_id') and len(obj) == 1:
if doc_obj._id == obj[0]._id:
return True
raise ValidationException(
'There is already a {key} with the value of {value}'
.format(key=key, value=value))
def prep_doc(self, doc_obj):
"""
This method Validates, gets the Python value, checks unique indexes,
gets the db value, and then returns the prepared doc dict object.
Useful for save and backup functions.
@param doc_obj:
@return:
"""
doc = doc_obj._data.copy()
for key, prop in list(doc_obj._base_properties.items()):
prop.validate(doc.get(key), key)
raw_value = prop.get_python_v
|
alue(doc.get(key))
if prop.unique:
self.check_unique(doc_obj, key, raw_value)
value = prop.get_db_value(raw_value)
doc[key] = value
|
doc['_doc_type'] = get_doc_type(doc_obj.__class__)
return doc
def _save(self, doc_obj):
doc = self.prep_doc(doc_obj)
if '_id' not in doc:
self.create_pk(doc_obj,doc)
doc['_id'] = doc_obj._id
return (doc_obj, doc)
def get_id_list(self, filters_list):
l = self.parse_filters(filters_list)
if len(l) == 1:
return self._indexer.smembers(l[0])
else:
return self._indexer.sinter(*l)
def parse_filters(self, filters):
s = set()
for f in filters:
if '*' in f:
s.update(self._indexer.scan_iter(f))
else:
s.add(f)
if not s:
return filters
return list(s)
def sort(self, sortingp_list, docs_list, doc_class):
for sortingp in sortingp_list:
if sortingp.key not in doc_class._base_properties:
raise ValueError("Field '%s' doesn't exists in a document" % sortingp.key)
sorted_list = list(docs_list)
# check if a list can be sorted by serveral attributes with one function call
if SortingParam.needs_multiple_passes(sortingp_list):
for sortingp in sortingp_list:
sorted_list = sorted(sorted_list, key=lambda x: getattr(x, sortingp.key),
reverse=sortingp.reverse)
else:
sorted_list = sorted(sorted_list, key=SortingParam.attr_sort(sortingp_list),
reverse=sortingp_list[0].reverse)
return sorted_list
|
dwhswenson/mdtraj
|
mdtraj/reporters/hdf5reporter.py
|
Python
|
lgpl-2.1
| 4,749
| 0.000632
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""OpenMM Reporter for saving the positions of a molecular dynamics simulation
in the HDF5 format.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
from mdtraj.formats.hdf5 import HDF5TrajectoryFile
from mdtraj.reporters.basereporter import _BaseReporter
##############################################################################
# Classes
##############################################################################
class HDF5Reporter(_BaseReporter):
"""HDF5Reporter stores a molecular dynamics trajectory in the HDF5 format.
This object supports saving all kinds of information from the simulation --
more than any other trajectory format. In addition to all of the options,
the topology of the system will also (of course) be stored in the file. All
of the information is compressed, so the size of the file is not much
different than DCD, despite the added flexibility.
Parameters
----------
file : str, or HDF5T
|
rajectoryFile
Either an open HDF5TrajecoryFile object to write to, or a string
specifying the filename of a new HDF5 file to save the trajectory to.
reportInterval : int
The interval (in time steps) at which to write frames.
coordinates : bool
Whether to write the coordinates to the file.
time : bool
Whether to write the current time to the
|
file.
cell : bool
Whether to write the current unit cell dimensions to the file.
potentialEnergy : bool
Whether to write the potential energy to the file.
kineticEnergy : bool
Whether to write the kinetic energy to the file.
temperature : bool
Whether to write the instantaneous temperature to the file.
velocities : bool
Whether to write the velocities to the file.
atomSubset : array_like, default=None
Only write a subset of the atoms, with these (zero based) indices
to the file. If None, *all* of the atoms will be written to disk.
enforcePeriodicBox: bool or None
Specifies whether particle positions should be translated so the
center of every molecule lies in the same periodic box. If None
(the default), it will automatically decide whether to translate
molecules based on whether the system being simulated uses periodic
boundary conditions.
Notes
-----
If you use the ``atomSubset`` option to write only a subset of the atoms
to disk, the ``kineticEnergy``, ``potentialEnergy``, and ``temperature``
fields will not change. They will still refer to the energy and temperature
of the *whole* system, and are not "subsetted" to only include the energy
of your subsystem.
Examples
--------
>>> simulation = Simulation(topology, system, integrator)
>>> h5_reporter = HDF5Reporter('traj.h5', 100)
>>> simulation.reporters.append(h5_reporter)
>>> simulation.step(10000)
>>> traj = mdtraj.trajectory.load('traj.lh5')
"""
@property
def backend(self):
return HDF5TrajectoryFile
def __init__(self, file, reportInterval, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True,
temperature=True, velocities=False, atomSubset=None,
enforcePeriodicBox=None):
"""Create a HDF5Reporter.
"""
super(HDF5Reporter, self).__init__(file, reportInterval,
coordinates, time, cell, potentialEnergy, kineticEnergy,
temperature, velocities, atomSubset,
enforcePeriodicBox)
|
nagareproject/core
|
doc/conf.py
|
Python
|
bsd-3-clause
| 5,554
| 0.001801
|
# -*- coding: utf-8 -*-
#
# Nagare documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 29 15:07:51 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or
|
modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.p
|
ath.abspath to make it absolute, like shown here.
#
#import os
#import sys
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.githubpages', 'sphinxcontrib.mermaid']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nagare'
copyright = u'2017, Net-ng'
author = u'Alain Poirier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_style = 'theme.css'
html_favicon = '_static/favicon.ico'
html_logo = '_static/logo.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = dict(
logo_only=True,
collapse_navigation=True,
prev_next_buttons_location='bottom',
display_version=False,
sticky_navigation=False
)
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
html_show_copyright = False
html_show_sphinx = False
#html_use_index = False
html_show_sourcelink = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nagaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nagare.tex', u'Nagare Documentation',
u'Alain Poirier', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nagare', u'Nagare Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nagare', u'Nagare Documentation',
author, 'Nagare', 'One line description of project.',
'Miscellaneous'),
]
|
gomex/docker-zabbix
|
docker_service/__init__.py
|
Python
|
gpl-3.0
| 148
| 0.006757
|
def main():
"""Instantiate a DockerStats object and collect stats."""
print('Docker Service Module')
if __
|
name__ =
|
= '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.