repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tradebyte/paci
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
MicroPyramid/docker-box
|
refs/heads/master
|
dockit/management/commands/__init__.py
|
12133432
| |
GunoH/intellij-community
|
refs/heads/master
|
python/testData/completion/className/orderingSymbolBeforeModule/a/foo.py
|
12133432
| |
wuzheng-sjtu/FastFPN
|
refs/heads/master
|
unit_test/__init__.py
|
12133432
| |
bratsche/Neutron-Drive
|
refs/heads/master
|
google_appengine/lib/django_1_2/django/bin/__init__.py
|
12133432
| |
bratsche/Neutron-Drive
|
refs/heads/master
|
google_appengine/lib/django_1_3/tests/modeltests/aggregation/__init__.py
|
12133432
| |
JianyuWang/neutron
|
refs/heads/master
|
neutron/tests/unit/tests/__init__.py
|
12133432
| |
ryankynor/Calculate-Pi
|
refs/heads/master
|
calculatepi.py
|
1
|
"""
calculatepi.py
Author: Ryan Kynor
Credit: Mr. Dennison
Assignment:
Write and submit a Python program that computes an approximate value of π by calculating the following sum:
(see: https://github.com/HHS-IntroProgramming/Calculate-Pi/blob/master/README.md)
This sum approaches the true value of π as n approaches ∞.
Your program must ask the user how many terms to use in the estimate of π, how many decimal places,
then print the estimate using that many decimal places. Exactly like this:
I will estimate pi. How many terms should I use? 100
How many decimal places should I use in the result? 7
The approximate value of pi is 3.1315929
Note: remember that the printed value of pi will be an estimate!
"""
import math
#print (dir(math))
n = int(input("I will estimate pi. How many terms should I use? "))
decimals = int(input("How many decimal places should I use in the result? "))
pi = 4.0*sum([(((-1.0)**k)/(2*k+1)) for k in range(0,n)])
print("The approximate value of pi is {0:.{1}f}".format(pi, decimals))
|
pniedzielski/fb-hackathon-2013-11-21
|
refs/heads/master
|
src/repl.it/jsrepl/extern/python/unclosured/lib/python2.7/lib2to3/fixes/fix_future.py
|
529
|
"""Remove __future__ imports
from __future__ import foo is replaced with an empty line.
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import BlankLine
class FixFuture(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
# This should be run last -- some things check for the import
run_order = 10
def transform(self, node, results):
new = BlankLine()
new.prefix = node.prefix
return new
|
rrrene/django
|
refs/heads/master
|
django/contrib/auth/hashers.py
|
66
|
from __future__ import unicode_literals
import base64
import binascii
import hashlib
import importlib
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
must_update = hasher.algorithm != preferred.algorithm
if not must_update:
must_update = preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 30000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(rounds=self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
# Ensure that our data is a bytestring
data = force_bytes(data)
# force_bytes() necessary for py-bcrypt compatibility
hashpw = force_bytes(bcrypt.hashpw(password, data))
return constant_time_compare(data, hashpw)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
def must_update(self, encoded):
algorithm, empty, algostr, rounds, data = encoded.split('$', 4)
return int(rounds) != self.rounds
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSha512PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
|
WillieMaddox/numpy
|
refs/heads/master
|
benchmarks/benchmarks/common.py
|
29
|
import numpy
import random
# Various pre-crafted datasets/variables for testing
# !!! Must not be changed -- only appended !!!
# while testing numpy we better not rely on numpy to produce random
# sequences
random.seed(1)
# but will seed it nevertheless
numpy.random.seed(1)
nx, ny = 1000, 1000
# reduced squares based on indexes_rand, primarily for testing more
# time-consuming functions (ufunc, linalg, etc)
nxs, nys = 100, 100
# a set of interesting types to test
TYPES1 = [
'int16', 'float16',
'int32', 'float32',
'int64', 'float64', 'complex64',
'longfloat', 'complex128',
'complex256',
]
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
values = [random.uniform(0, 100) for x in range(nx*ny/10)]*10
squares = {t: numpy.array(values,
dtype=getattr(numpy, t)).reshape((nx, ny))
for t in TYPES1}
# adjust complex ones to have non-degenerated imagery part -- use
# original data transposed for that
for t, v in squares.iteritems():
if t.startswith('complex'):
v += v.T*1j
# smaller squares
squares_ = {t: s[:nxs, :nys] for t, s in squares.iteritems()}
# vectors
vectors = {t: s[0] for t, s in squares.iteritems()}
indexes = range(nx)
# so we do not have all items
indexes.pop(5)
indexes.pop(95)
indexes_rand = indexes[:] # copy
random.shuffle(indexes_rand) # in-place shuffle
# only now make them arrays
indexes = numpy.array(indexes)
indexes_rand = numpy.array(indexes_rand)
# smaller versions
indexes_ = indexes[indexes < nxs]
indexes_rand_ = indexes_rand[indexes_rand < nxs]
class Benchmark(object):
goal_time = 0.25
|
krzysztof-magosa/ansible-modules-extras
|
refs/heads/devel
|
cloud/webfaction/webfaction_domain.py
|
153
|
#!/usr/bin/python
#
# Create Webfaction domains and subdomains using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: webfaction_domain
short_description: Add or remove domains and subdomains on Webfaction
description:
- Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the domain
required: true
state:
description:
- Whether the domain should exist
required: false
choices: ['present', 'absent']
default: "present"
subdomains:
description:
- Any subdomains to create.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: Create a test domain
webfaction_domain:
name: mydomain.com
state: present
subdomains:
- www
- blog
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
- name: Delete test domain and any subdomains
webfaction_domain:
name: mydomain.com
state: absent
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
subdomains = dict(required=False, default=[]),
login_name = dict(required=True),
login_password = dict(required=True),
),
supports_check_mode=True
)
domain_name = module.params['name']
domain_state = module.params['state']
domain_subdomains = module.params['subdomains']
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
domain_list = webfaction.list_domains(session_id)
domain_map = dict([(i['domain'], i) for i in domain_list])
existing_domain = domain_map.get(domain_name)
result = {}
# Here's where the real stuff happens
if domain_state == 'present':
# Does an app with this name already exist?
if existing_domain:
if set(existing_domain['subdomains']) >= set(domain_subdomains):
# If it exists with the right subdomains, we don't change anything.
module.exit_json(
changed = False,
)
positional_args = [session_id, domain_name] + domain_subdomains
if not module.check_mode:
# If this isn't a dry run, create the app
# print positional_args
result.update(
webfaction.create_domain(
*positional_args
)
)
elif domain_state == 'absent':
# If the app's already not there, nothing changed.
if not existing_domain:
module.exit_json(
changed = False,
)
positional_args = [session_id, domain_name] + domain_subdomains
if not module.check_mode:
# If this isn't a dry run, delete the app
result.update(
webfaction.delete_domain(*positional_args)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(domain_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
main()
|
jalavik/invenio
|
refs/heads/master
|
invenio/testsuite/test_restful.py
|
12
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask import url_for, request
from flask_restful import Resource
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
from invenio.ext.restful import require_api_auth, require_oauth_scopes, \
require_header
from invenio.ext.sqlalchemy import db
class DecoratorsTestCase(InvenioTestCase):
def setUp(self):
from invenio.modules.accounts.models import User
from invenio.modules.oauth2server.registry import scopes
from invenio.modules.oauth2server.models import Token, Scope
# Setup variables:
self.called = dict()
# Setup test scopes
with self.app.app_context():
scopes.register(Scope(
'test:testscope',
group='Test',
help_text='Test scope',
))
# Setup API resources
class Test1Resource(Resource):
# NOTE: Method decorators are applied in reverse order
method_decorators = [
require_oauth_scopes('test:testscope'),
require_api_auth(),
]
def get(self):
assert request.oauth.access_token
return "success", 200
def post(self):
assert request.oauth.access_token
return "success", 200
@require_header('Content-Type', 'application/json')
def put(self):
return "success", 200
class Test2Resource(Resource):
@require_api_auth()
@require_oauth_scopes('test:testscope')
def get(self):
assert request.oauth.access_token
return "success", 200
@require_api_auth()
@require_oauth_scopes('test:testscope')
def post(self):
assert request.oauth.access_token
return "success", 200
@require_header('Content-Type', 'text/html')
def put(self):
return "success", 200
# Register API resources
api = self.app.extensions['restful']
api.add_resource(
Test1Resource,
'/api/test1/decoratorstestcase/'
)
api.add_resource(
Test2Resource,
'/api/test2/decoratorstestcase/'
)
# Create a user
self.user = User(
email='info@invenio-software.org', nickname='tester'
)
self.user.password = "tester"
db.session.add(self.user)
db.session.commit()
# Create tokens
self.token = Token.create_personal(
'test-', self.user.id, scopes=['test:testscope'], is_internal=True)
self.token_noscope = Token.create_personal(
'test-', self.user.id, scopes=[], is_internal=True)
def tearDown(self):
db.session.delete(self.user)
db.session.delete(self.token.client)
db.session.delete(self.token)
db.session.delete(self.token_noscope.client)
db.session.delete(self.token_noscope)
db.session.commit()
def test_require_api_auth_test1(self):
res = self.client.get(url_for('test1resource'))
self.assert401(res)
res = self.client.get(
url_for('test1resource', access_token=self.token.access_token))
self.assert200(res)
def test_require_api_auth_test2(self):
res = self.client.get(url_for('test2resource'))
self.assert401(res)
res = self.client.get(
url_for('test2resource', access_token=self.token.access_token))
self.assert200(res)
def test_require_oauth_scopes_test1(self):
res = self.client.post(
url_for('test1resource', access_token=self.token.access_token))
self.assert200(res)
res = self.client.post(
url_for('test1resource',
access_token=self.token_noscope.access_token))
self.assertStatus(res, 403)
def test_require_oauth_scopes_test2(self):
res = self.client.post(
url_for('test2resource', access_token=self.token.access_token))
self.assert200(res)
res = self.client.post(
url_for('test2resource',
access_token=self.token_noscope.access_token))
self.assertStatus(res, 403)
def test_require_header_test1(self):
res = self.client.put(
url_for('test1resource', access_token=self.token.access_token),
headers=[('Content-Type', 'application/json')])
self.assert200(res)
res = self.client.put(
url_for('test1resource', access_token=self.token.access_token),
headers=[('Content-Type', 'text/html')])
self.assertStatus(res, 415)
def test_require_header_test2(self):
res = self.client.put(
url_for('test2resource'),
headers=[('Content-Type', 'text/html; charset=UTF-8')])
self.assert200(res)
res = self.client.put(
url_for('test2resource'),
headers=[('Content-Type', 'application/json')])
self.assertStatus(res, 415)
class RestfulPaginationTestCase(InvenioTestCase):
def setUp(self):
"""Set up some dummy data and a resource."""
from invenio.modules.accounts.models import User
from invenio.modules.oauth2server.models import Token
self.data = range(25)
# setup test api resources
class TestDataResource(Resource):
method_decorators = [
require_api_auth()
]
@require_header('Content-Type', 'application/json')
def get(self):
import json
from flask import make_response
from invenio.ext.restful.errors import(
InvalidPageError
)
from invenio.ext.restful import pagination
# Test to see that the exceptions are raised correctly
# In restful.py it is not needed because the error_hanler
# takes care of exceptions
response = None
try:
# test data
testdata = range(25)
endpoint = request.endpoint
args = request.args
page = int(args.get('page', 1))
per_page = int(args.get('per_page', 10))
p = pagination.RestfulPagination(
page=page, per_page=per_page, total_count=len(testdata)
)
data_to_return = p.slice(testdata)
kwargs = {}
kwargs['endpoint'] = endpoint
kwargs['args'] = request.args
link_header = p.link_header(**kwargs)
response = make_response(json.dumps(data_to_return))
response.headers[link_header[0]] = link_header[1]
response.headers['Content-Type'] = request.headers['Content-Type']
except InvalidPageError as e:
exception = {}
exception['message'] = e.error_msg
exception['type'] = "{0}".format(type(e))
response = make_response(json.dumps(exception))
return response
# Register API resources
api = self.app.extensions['restful']
api.add_resource(
TestDataResource,
'/api/testdata/'
)
# Create a user
self.user = User(
email='info@invenio-software.org', nickname='tester'
)
self.user.password = "tester"
db.session.add(self.user)
db.session.commit()
# create token
self.token = Token.create_personal(
'test-', self.user.id, scopes=[], is_internal=True)
def tearDown(self):
"""Delete the dummy data."""
del self.data
db.session.delete(self.user)
db.session.delete(self.token.client)
db.session.delete(self.token)
db.session.commit()
def test_paginate_page1(self):
endpoint = "/api/testdata/?"
link_template = '<{}per_page={}&page={}>; rel="{}"'
answer_get = self.client.get(
url_for('testdataresource', access_token=self.token.access_token,
per_page=10),
headers=[('Content-Type', 'application/json')])
data_returned = answer_get.json
links_string = answer_get.headers['Link']
# expected answers
expected_data = self.data[0:10]
first_link = link_template.format(endpoint, 10, 1, "first")
next_link = link_template.format(endpoint, 10, 2, "next")
last_link = link_template.format(endpoint, 10, 3, "last")
expected_links_string = "{0},{1},{2}".format(
first_link,
next_link,
last_link
)
self.assertEqual(data_returned, expected_data)
self.assertEqual(links_string, expected_links_string)
def test_paginate_page2(self):
endpoint = "/api/testdata/?"
link_template = '<{}per_page={}&page={}>; rel="{}"'
answer_get = self.client.get(
url_for('testdataresource', access_token=self.token.access_token,
page=2, per_page=10),
headers=[('Content-Type', 'application/json')])
data_returned = answer_get.json
links_string = answer_get.headers['Link']
# expected answers
expected_data = self.data[10:20]
first_link = link_template.format(endpoint, 10, 1, "first")
prev_link = link_template.format(endpoint, 10, 1, "prev")
next_link = link_template.format(endpoint, 10, 3, "next")
last_link = link_template.format(endpoint, 10, 3, "last")
expected_links_string = "{0},{1},{2},{3}".format(
first_link,
prev_link,
next_link,
last_link
)
self.assertEqual(data_returned, expected_data)
self.assertEqual(links_string, expected_links_string)
def test_paginate_lastpage(self):
endpoint = "/api/testdata/?"
link_template = '<{}per_page={}&page={}>; rel="{}"'
answer_get = self.client.get(
url_for('testdataresource', access_token=self.token.access_token,
page=3, per_page=10),
headers=[('Content-Type', 'application/json')])
data_returned = answer_get.json
links_string = answer_get.headers['Link']
# expected answers
expected_data = self.data[20:25]
first_link = link_template.format(endpoint, 10, 1, "first")
prev_link = link_template.format(endpoint, 10, 2, "prev")
last_link = link_template.format(endpoint, 10, 3, "last")
expected_links_string = "{0},{1},{2}".format(
first_link,
prev_link,
last_link
)
self.assertEqual(data_returned, expected_data)
self.assertEqual(links_string, expected_links_string)
def test_paginate_nonexistentpage(self):
from invenio.ext.restful import errors
answer_get = self.client.get(
url_for('testdataresource',
access_token=self.token.access_token,
page=-2, per_page=10),
headers=[('Content-Type', 'application/json')])
# Test/assert to see that the exceptions are raised correctly
expected = {}
error_msg = "Invalid page number ('{0}').".format(-2)
expected['message'] = error_msg
expected['type'] = "{0}".format(errors.InvalidPageError)
self.assertEqual(answer_get.json, expected)
def test_paginate_per_pageerror(self):
from invenio.ext.restful import errors
answer_get = self.client.get(
url_for('testdataresource',
access_token=self.token.access_token,
page=1, per_page=-5),
headers=[('Content-Type', 'application/json')])
# Test/assert to see that the exceptions are raised correctly
expected = {}
error_msg = (
"Invalid per_page argument ('{0}'). Number of items "
"per pages must be positive integer.".format(-5)
)
expected['message'] = error_msg
expected['type'] = "{0}".format(errors.InvalidPageError)
self.assertEqual(answer_get.json, expected)
class RestfulSQLAlchemyPaginationTestCase(InvenioTestCase):
def setUp(self):
from flask_restful import Resource, fields, marshal
from invenio.modules.accounts.models import User
from invenio.modules.oauth2server.models import Token
class TagRepresenation(object):
"""A representation of a tag.
This class will be only used to return a tag as JSON.
"""
marshaling_fields = dict(
id=fields.Integer,
name=fields.String,
id_user=fields.Integer
)
def __init__(self, retrieved_tag):
"""Initialization.
Declared the attributes to marshal with a tag.
:param retrieved_tag: a tag from the database
"""
#get fields from the given tag
self.id = retrieved_tag.id
self.name = retrieved_tag.name
self.id_user = retrieved_tag.id_user
def marshal(self):
"""Marshal the Tag."""
return marshal(self, self.marshaling_fields)
class TestTagsResource(Resource):
method_decorators = [
require_api_auth()
]
@require_header('Content-Type', 'application/json')
def get(self):
import json
from flask import make_response
from invenio.modules.tags.models import WtgTAG
from invenio.ext.restful.errors import(
RestfulError, InvalidPageError
)
from invenio.ext.restful import pagination
response = None
try:
endpoint = request.endpoint
args = request.args
page = int(args.get('page', 1))
per_page = int(args.get('per_page', 2))
# check values arguments and raise exceptions if any errors
if per_page < 0:
raise RestfulError(
error_msg="Invalid per_page: {}".format(per_page),
status_code=400
)
if page < 0:
raise InvalidPageError(
error_msg="Invalid page: {}".format(page),
status_code=400
)
# need to sort by id
# also assuming only one user so no need to filter
# user's id
tags_q = WtgTAG.query.order_by(WtgTAG.id)
p = pagination.RestfulSQLAlchemyPagination(
query=tags_q, page=page, per_page=per_page
)
if page > p.pages:
raise InvalidPageError(
error_msg="Invalid page: {}".format(page),
status_code=400
)
tags_to_return = map(
lambda x: TagRepresenation(x).marshal(),
p.items
)
kwargs = {}
kwargs['endpoint'] = endpoint
kwargs['args'] = request.args
link_header = p.link_header(**kwargs)
response = make_response(json.dumps(tags_to_return))
response.headers[link_header[0]] = link_header[1]
response.headers['Content-Type'] = request.headers['Content-Type']
except (RestfulError, InvalidPageError) as e:
exception = {}
exception['message'] = e.error_msg
exception['type'] = "{0}".format(type(e))
response = make_response(json.dumps(exception))
return response
# Register API resources
api = self.app.extensions['restful']
api.add_resource(
TestTagsResource,
'/api/testtags/'
)
# Create a user
self.user = User(
email='info@invenio-software.org', nickname='tester'
)
self.user.password = "tester"
db.session.add(self.user)
db.session.commit()
# create token
self.token = Token.create_personal(
'test-', self.user.id, scopes=[], is_internal=True)
def tearDown(self):
db.session.delete(self.user)
db.session.delete(self.token.client)
db.session.delete(self.token)
db.session.commit()
def test_pagination_flow(self):
from invenio.modules.tags import api as tags_api
# template of tags names
tag_name_template = "tag{}"
# endpoint
endpoint = "/api/testtags/?"
# links template
link_template = '<{}per_page={}&page={}>; rel="{}"'
# create tags
for i in range(1, 7):
tag_name = tag_name_template.format(i)
tags_api.create_tag_for_user(self.user.id, tag_name)
# request first page
answer_get = self.client.get(
url_for('testtagsresource', access_token=self.token.access_token,
page=1, per_page=2),
headers=[('Content-Type', 'application/json')])
# check to ensure correct results
tags_names_from_request = [x['name'] for x in answer_get.json]
links_string = answer_get.headers['Link']
expected_names = []
for i in range(1, 3):
expected_name = tag_name_template.format(i)
expected_names.append(expected_name)
first_link = link_template.format(endpoint, 2, 1, "first")
next_link = link_template.format(endpoint, 2, 2, "next")
last_link = link_template.format(endpoint, 2, 3, "last")
expected_links_string = "{0},{1},{2}".format(
first_link,
next_link,
last_link
)
self.assertEqual(set(tags_names_from_request), set(expected_names))
self.assertEqual(links_string, expected_links_string)
tags_names_from_request = []
expected_names = []
# request second page
answer_get = self.client.get(
url_for('testtagsresource', access_token=self.token.access_token,
page=2, per_page=2),
headers=[('Content-Type', 'application/json')])
# check to ensure correct results
tags_names_from_request = [x['name'] for x in answer_get.json]
links_string = answer_get.headers['Link']
# check if names of tags are the expected ones
expected_names = []
for i in range(3, 5):
expected_name = tag_name_template.format(i)
expected_names.append(expected_name)
first_link = link_template.format(endpoint, 2, 1, "first")
prev_link = link_template.format(endpoint, 2, 1, "prev")
next_link = link_template.format(endpoint, 2, 3, "next")
last_link = link_template.format(endpoint, 2, 3, "last")
expected_links_string = "{0},{1},{2},{3}".format(
first_link,
prev_link,
next_link,
last_link
)
self.assertEqual(set(tags_names_from_request), set(expected_names))
self.assertEqual(links_string, expected_links_string)
tags_names_from_request = []
expected_names = []
# request third(last) page
answer_get = self.client.get(
url_for('testtagsresource', access_token=self.token.access_token,
page=3, per_page=2),
headers=[('Content-Type', 'application/json')])
# check to ensure correct results
tags_names_from_request = [x['name'] for x in answer_get.json]
links_string = answer_get.headers['Link']
# check if names of tags are the expected ones
expected_names = []
for i in range(5, 7):
expected_name = tag_name_template.format(i)
expected_names.append(expected_name)
first_link = link_template.format(endpoint, 2, 1, "first")
prev_link = link_template.format(endpoint, 2, 2, "prev")
last_link = link_template.format(endpoint, 2, 3, "last")
expected_links_string = "{0},{1},{2}".format(
first_link,
prev_link,
last_link
)
self.assertEqual(set(tags_names_from_request), set(expected_names))
self.assertEqual(links_string, expected_links_string)
# delete created tags
tags_api.delete_all_tags_from_user(self.user.id)
def test_paginate_nonexistentpage(self):
from invenio.ext.restful import errors
answer_get = self.client.get(
url_for('testtagsresource',
access_token=self.token.access_token,
page=-2),
headers=[('Content-Type', 'application/json')])
# Test/assert to see that the exceptions are raised correctly
expected = {}
expected['message'] = "Invalid page: {0}".format(-2)
expected['type'] = "{0}".format(errors.InvalidPageError)
self.assertEqual(answer_get.json, expected)
def test_paginate_per_pageerror(self):
from invenio.ext.restful import errors
answer_get = self.client.get(
url_for('testtagsresource',
access_token=self.token.access_token,
per_page=-5),
headers=[('Content-Type', 'application/json')])
# Test/assert to see that the exceptions are raised correctly
expected = {}
expected['message'] = "Invalid per_page: {0}".format(-5)
expected['type'] = "{0}".format(errors.RestfulError)
self.assertEqual(answer_get.json, expected)
TEST_SUITE = make_test_suite(DecoratorsTestCase, RestfulPaginationTestCase,
RestfulSQLAlchemyPaginationTestCase)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/ship/attachment/weapon/shared_aggressor_weapon_s02.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/weapon/shared_aggressor_weapon_s02.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
miragshin/ZeroNet
|
refs/heads/master
|
plugins/Sidebar/maxminddb/__init__.py
|
22
|
# pylint:disable=C0111
import os
import maxminddb.reader
try:
import maxminddb.extension
except ImportError:
maxminddb.extension = None
from maxminddb.const import (MODE_AUTO, MODE_MMAP, MODE_MMAP_EXT, MODE_FILE,
MODE_MEMORY)
from maxminddb.decoder import InvalidDatabaseError
def open_database(database, mode=MODE_AUTO):
"""Open a Maxmind DB database
Arguments:
database -- A path to a valid MaxMind DB file such as a GeoIP2
database file.
mode -- mode to open the database with. Valid mode are:
* MODE_MMAP_EXT - use the C extension with memory map.
* MODE_MMAP - read from memory map. Pure Python.
* MODE_FILE - read database as standard file. Pure Python.
* MODE_MEMORY - load database into memory. Pure Python.
* MODE_AUTO - tries MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that
order. Default mode.
"""
if (mode == MODE_AUTO and maxminddb.extension and
hasattr(maxminddb.extension, 'Reader')) or mode == MODE_MMAP_EXT:
return maxminddb.extension.Reader(database)
elif mode in (MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY):
return maxminddb.reader.Reader(database, mode)
raise ValueError('Unsupported open mode: {0}'.format(mode))
def Reader(database): # pylint: disable=invalid-name
"""This exists for backwards compatibility. Use open_database instead"""
return open_database(database)
__title__ = 'maxminddb'
__version__ = '1.2.0'
__author__ = 'Gregory Oschwald'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2014 Maxmind, Inc.'
|
perojonsson/pyxtuml
|
refs/heads/master
|
tests/utils.py
|
1
|
# encoding: utf-8
# Copyright (C) 2014-2015 John Törnblom
def expect_exception(exception):
'''
Decorator for expecting exceptions to be thrown from a test case
'''
def test_decorator(fn):
def test_decorated(self, *args, **kwargs):
self.assertRaises(exception, fn, self, *args, **kwargs)
return test_decorated
return test_decorator
|
lbeltrame/letsencrypt
|
refs/heads/master
|
letsencrypt-nginx/letsencrypt_nginx/tests/tls_sni_01_test.py
|
17
|
"""Tests for letsencrypt_nginx.tls_sni_01"""
import unittest
import shutil
import mock
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt.plugins import common_test
from letsencrypt.tests import acme_util
from letsencrypt_nginx import obj
from letsencrypt_nginx.tests import util
class TlsSniPerformTest(util.NginxTest):
"""Test the NginxTlsSni01 challenge."""
account_key = common_test.TLSSNI01Test.auth_key
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(token="kNdwjwOeX0I_A8DXt9Msmg"), "pending"),
domain="www.example.com", account_key=account_key),
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(
token="\xba\xa9\xda?<m\xaewmx\xea\xad\xadv\xf4\x02\xc9y"
"\x80\xe2_X\t\xe7\xc7\xa4\t\xca\xf7&\x945"
), "pending"),
domain="blah", account_key=account_key),
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(
token="\x8c\x8a\xbf_-f\\cw\xee\xd6\xf8/\xa5\xe3\xfd"
"\xeb9\xf1\xf5\xb9\xefVM\xc9w\xa4u\x9c\xe1\x87\xb4"
), "pending"),
domain="www.example.org", account_key=account_key),
]
def setUp(self):
super(TlsSniPerformTest, self).setUp()
config = util.get_nginx_configurator(
self.config_path, self.config_dir, self.work_dir)
from letsencrypt_nginx import tls_sni_01
self.sni = tls_sni_01.NginxTlsSni01(config)
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
@mock.patch("letsencrypt_nginx.configurator"
".NginxConfigurator.choose_vhost")
def test_perform(self, mock_choose):
self.sni.add_chall(self.achalls[1])
mock_choose.return_value = None
result = self.sni.perform()
self.assertTrue(result is None)
def test_perform0(self):
responses = self.sni.perform()
self.assertEqual([], responses)
@mock.patch("letsencrypt_nginx.configurator.NginxConfigurator.save")
def test_perform1(self, mock_save):
self.sni.add_chall(self.achalls[0])
response = self.achalls[0].response(self.account_key)
mock_setup_cert = mock.MagicMock(return_value=response)
# pylint: disable=protected-access
self.sni._setup_challenge_cert = mock_setup_cert
responses = self.sni.perform()
mock_setup_cert.assert_called_once_with(self.achalls[0])
self.assertEqual([response], responses)
self.assertEqual(mock_save.call_count, 2)
# Make sure challenge config is included in main config
http = self.sni.configurator.parser.parsed[
self.sni.configurator.parser.loc["root"]][-1]
self.assertTrue(
util.contains_at_depth(http, ['include', self.sni.challenge_conf], 1))
def test_perform2(self):
acme_responses = []
for achall in self.achalls:
self.sni.add_chall(achall)
acme_responses.append(achall.response(self.account_key))
mock_setup_cert = mock.MagicMock(side_effect=acme_responses)
# pylint: disable=protected-access
self.sni._setup_challenge_cert = mock_setup_cert
sni_responses = self.sni.perform()
self.assertEqual(mock_setup_cert.call_count, 3)
for index, achall in enumerate(self.achalls):
self.assertEqual(
mock_setup_cert.call_args_list[index], mock.call(achall))
http = self.sni.configurator.parser.parsed[
self.sni.configurator.parser.loc["root"]][-1]
self.assertTrue(['include', self.sni.challenge_conf] in http[1])
self.assertTrue(
util.contains_at_depth(http, ['server_name', 'blah'], 3))
self.assertEqual(len(sni_responses), 3)
for i in xrange(3):
self.assertEqual(sni_responses[i], acme_responses[i])
def test_mod_config(self):
self.sni.add_chall(self.achalls[0])
self.sni.add_chall(self.achalls[2])
v_addr1 = [obj.Addr("69.50.225.155", "9000", True, False),
obj.Addr("127.0.0.1", "", False, False)]
v_addr2 = [obj.Addr("myhost", "", False, True)]
ll_addr = [v_addr1, v_addr2]
self.sni._mod_config(ll_addr) # pylint: disable=protected-access
self.sni.configurator.save()
self.sni.configurator.parser.load()
http = self.sni.configurator.parser.parsed[
self.sni.configurator.parser.loc["root"]][-1]
self.assertTrue(['include', self.sni.challenge_conf] in http[1])
vhosts = self.sni.configurator.parser.get_vhosts()
vhs = [vh for vh in vhosts if vh.filep == self.sni.challenge_conf]
for vhost in vhs:
if vhost.addrs == set(v_addr1):
response = self.achalls[0].response(self.account_key)
else:
response = self.achalls[2].response(self.account_key)
self.assertEqual(vhost.addrs, set(v_addr2))
self.assertEqual(vhost.names, set([response.z_domain]))
self.assertEqual(len(vhs), 2)
def test_mod_config_fail(self):
root = self.sni.configurator.parser.loc["root"]
self.sni.configurator.parser.parsed[root] = [['include', 'foo.conf']]
# pylint: disable=protected-access
self.assertRaises(
errors.MisconfigurationError, self.sni._mod_config, [])
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
jiumem/cgt
|
refs/heads/master
|
examples/broken/mnist_torchstyle.py
|
22
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original', data_home='~/cgt/data') # XXX
print(mnist.data.shape)
print(mnist.target.shape)
np.unique(mnist.target)
#plt.imshow(mnist.data[1, :].reshape(28, 28))
#plt.show()
# do some preprocessing
X = mnist.data
y = mnist.target
X = X.astype('float64')
X = X / 255
# train-test split (as [Joachims, 2006])
# TODO can define own validation split...
n_train = 60000
X_train = X[:n_train, :]
X_test = X[n_train:, :]
y_train = y[:n_train]
y_test = y[n_train:]
# construct the network
import nn
import cgt
from opt import sgd_update
N_LAYERS = 2
hid_size = X.shape[1] # 28 * 28
out_size = 10
inps = [cgt.matrix(dtype=cgt.floatX)]
param_list = []
for k in xrange(N_LAYERS):
tmp = nn.Affine(hid_size, hid_size)#(inps[k])
param_list.extend([tmp.weight, tmp.bias])
inps.append(cgt.tanh(tmp(inps[k])))
tmp = nn.Affine(hid_size, out_size)
param_list.extend([tmp.weight, tmp.bias])
logprobs = nn.logsoftmax(tmp(inps[-1]))
#dnn = nn.Module(inps[0:1], [logprobs])
#params = dnn.get_parameters()
# XXX think should just make this part of get_parameters
theta = nn.setup_contiguous_storage(param_list)
# XXX initialize
theta[:] = np.random.uniform(-0.08, 0.08, theta.shape)
# XXX taken from other demo, move
def ind2onehot(inds, n_cls):
out = np.zeros(list(inds.shape)+[n_cls,], cgt.floatX)
for k in xrange(inds.shape[0]):
out[k, inds[k].astype('int32')] = 1
#out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
b_size = 25
def make_loss_and_grad(net):
X_b = inps[0] #cgt.matrix(dtype=cgt.floatX)
y_onehot = cgt.matrix(dtype='i4')
outputs = [logprobs]
loss = nn.crossent(outputs[0], y_onehot) / b_size
#gradloss = cgt.grad(loss, params)
gradloss = cgt.grad(loss, param_list)
# XXX use flatcat function
grad = cgt.concatenate([x.flatten() for x in gradloss])
#grad = gradloss
return cgt.make_function([X_b, y_onehot], [loss, grad, logprobs])
f_loss_and_grad = make_loss_and_grad(None)
# train loop
# shuffle data
perm = np.random.permutation(np.arange(X_train.shape[0]))
X_train = X_train[perm, :]
y_train = y_train[perm]
class Table(object):
pass
state = Table()
state.theta = theta
state.step_size = 0.1
exploss = None
for k in xrange(X_train.shape[0] / b_size):
X_batch, y_batch = X_train[k*b_size:(k+1)*b_size, :], y_train[k*b_size:(k+1)*b_size]
loss, grad, logprobs = f_loss_and_grad(X_batch, ind2onehot(y_batch, 10))
exploss = loss if k == 0 else 0.99*exploss + 0.01*loss
print('iter %d, loss %f, exploss %f' % (k + 1, loss, exploss))
sgd_update(state, grad)
# test code
correct = 0
total = 0
print(X_test.shape)
print(y_test.shape)
for k in xrange(X_test.shape[0] / b_size):
X_batch, y_batch = X_test[k*b_size:(k+1)*b_size, :], y_test[k*b_size:(k+1)*b_size]
loss, grad, logprobs = f_loss_and_grad(X_batch, ind2onehot(y_batch, 10))
preds = logprobs.argmax(axis=1).flatten()
correct = correct + (preds == y_batch).sum()
total = total + b_size
print('%d/%d correct', correct, total)
|
alphagov/performanceplatform-collector
|
refs/heads/master
|
tests/performanceplatform/collector/ga/__init__.py
|
1
|
from datetime import datetime
import pytz
def dt(year, month, day, hours, minutes, seconds, tz):
_dt = datetime(year, month, day, hours, minutes, seconds)
return pytz.timezone(tz).localize(_dt)
|
boonchu/pykickstart
|
refs/heads/master
|
pykickstart/commands/selinux.py
|
9
|
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2005, 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import KickstartCommand
from pykickstart.constants import SELINUX_DISABLED, SELINUX_ENFORCING, SELINUX_PERMISSIVE
from pykickstart.options import KSOptionParser
class FC3_SELinux(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.selinux = kwargs.get("selinux", None)
def __str__(self):
retval = KickstartCommand.__str__(self)
if not retval and self.selinux is None:
return ""
retval += "# SELinux configuration\n"
if self.selinux == SELINUX_DISABLED:
retval += "selinux --disabled\n"
elif self.selinux == SELINUX_ENFORCING:
retval += "selinux --enforcing\n"
elif self.selinux == SELINUX_PERMISSIVE:
retval += "selinux --permissive\n"
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--disabled", dest="selinux", action="store_const",
const=SELINUX_DISABLED)
op.add_option("--enforcing", dest="selinux", action="store_const",
const=SELINUX_ENFORCING)
op.add_option("--permissive", dest="selinux", action="store_const",
const=SELINUX_PERMISSIVE)
return op
def parse(self, args):
(opts, _extra) = self.op.parse_args(args=args, lineno=self.lineno)
self._setToSelf(self.op, opts)
return self
|
rbdedu/runway
|
refs/heads/master
|
singleFileApp/.buildozer/android/platform/build/build/other_builds/python2/armeabi-v7a/python2/Doc/tools/roman.py
|
91
|
"""Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError("number out of range (must be 1..4999)")
if int(n) != n:
raise NotIntegerError("decimals can not be converted")
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError('Input can not be blank')
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError('Invalid Roman numeral: %s' % s)
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
|
SEMAFORInformatik/femagtools
|
refs/heads/master
|
femagtools/ntib.py
|
1
|
"""
femagtools.ntib
~~~~~~~~~~~~~~~
NTIB / LOS files handling
"""
import logging
import math
logger = logging.getLogger(__name__)
def create(speed, current, beta, r1=0, m=3):
"""return Ntib info"""
return ['Speed[1/min], Current[A], beta[Degr], R_STATOR[Ohm], n-p',
'Number of values: {0}'.format(
min(len(speed), len(current), len(beta)))] + \
['{0:12.1f}{1:12.3f}{2:12.3f}{3:12f}{4:12f}'.format(
60*n, math.sqrt(2.0)*i1,
b,
r1, m)
for n, i1, b in zip(speed, current, beta)]
def toFloat(s, fac=1.0):
try:
return float(s)*fac
except ValueError:
return float('nan')
def read_los_content(content):
"""return dict of losses in LOS-file content"""
result = dict(speed=[],
torque=[],
i1=[],
beta=[],
stajo=[],
staza=[],
rotfe=[],
magnet=[],
winding=[],
total=[])
started = False
for l in content:
if not started and l.startswith('[1/min] '):
started = True
elif started:
r = l.strip().split()
if len(r) > 7:
result['speed'].append(toFloat(r[0], 1./60))
result['torque'].append(toFloat(r[1]))
result['i1'].append(toFloat(r[2]))
result['beta'].append(toFloat(r[3]))
pfe1 = toFloat(r[4])
pfe2 = 0
p = 4
if len(r) > 8:
pfe2 = toFloat(r[5])
p = 5
result['stajo'].append(pfe1)
result['staza'].append(pfe2)
result['rotfe'].append(toFloat(r[p+1]))
result['magnet'].append(toFloat(r[p+2]))
result['winding'].append(toFloat(r[p+3]))
try:
result['stafe'] = result['stajo'] + result['staza']
result['total'].append(sum([result[k][-1]
for k in ('stajo',
'staza',
'rotfe',
'magnet',
'winding')]))
except KeyError:
result['total'].append(None)
logger.info("num rows %d", len(result['total']))
return result
def read_los(filename):
"""return dict of losses in LOS-file"""
logger.info("read loss file: %s", filename)
with open(filename) as f:
return read_los_content(f.readlines())
# empty
return dict(speed=[],
torque=[],
i1=[],
beta=[],
stajo=[],
staza=[],
rotfe=[],
magnet=[],
winding=[],
total=[])
|
astropy/astropy
|
refs/heads/main
|
astropy/io/ascii/tests/test_html.py
|
7
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``HTML``
reader/writer and aims to document its functionality.
Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
to be installed.
"""
from io import StringIO
from astropy.io.ascii import html
from astropy.io.ascii import core
from astropy.table import Table
import pytest
import numpy as np
from .common import setup_function, teardown_function # noqa
from astropy.io import ascii
from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound
@pytest.mark.skipif('not HAS_BS4')
def test_soupstring():
"""
Test to make sure the class SoupString behaves properly.
"""
soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>',
'html.parser')
soup_str = html.SoupString(soup)
assert isinstance(soup_str, str)
assert isinstance(soup_str, html.SoupString)
assert soup_str == '<html><head></head><body><p>foo</p></body></html>'
assert soup_str.soup is soup
def test_listwriter():
"""
Test to make sure the class ListWriter behaves properly.
"""
lst = []
writer = html.ListWriter(lst)
for i in range(5):
writer.write(i)
for ch in 'abcde':
writer.write(ch)
assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e']
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table():
"""
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process.
"""
# Should return False on non-<table> tags and None
soup = BeautifulSoup('<html><body></body></html>', 'html.parser')
assert html.identify_table(soup, {}, 0) is False
assert html.identify_table(None, {}, 0) is False
soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>'
'<td>B</td></tr></table>', 'html.parser').table
assert html.identify_table(soup, {}, 2) is False
assert html.identify_table(soup, {}, 1) is True # Default index of 1
# Same tests, but with explicit parameter
assert html.identify_table(soup, {'table_id': 2}, 1) is False
assert html.identify_table(soup, {'table_id': 1}, 1) is True
# Test identification by string ID
assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False
assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True
@pytest.mark.skipif('not HAS_BS4')
def test_missing_data():
"""
Test reading a table with missing data
"""
# First with default where blank => '0'
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td></td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
# Now with a specific value '...' => missing
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td>...</td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')])
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
@pytest.mark.skipif('not HAS_BS4')
def test_rename_cols():
"""
Test reading a table and renaming cols
"""
table_in = ['<table>',
'<tr><th>A</th> <th>B</th></tr>',
'<tr><td>1</td><td>2</td></tr>',
'</table>']
# Swap column names
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'])
assert dat.colnames == ['B', 'A']
assert len(dat) == 1
# Swap column names and only include A (the renamed version)
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A'])
assert dat.colnames == ['A']
assert len(dat) == 1
assert np.all(dat['A'] == 2)
@pytest.mark.skipif('not HAS_BS4')
def test_no_names():
"""
Test reading a table witn no column header
"""
table_in = ['<table>',
'<tr><td>1</td></tr>',
'<tr><td>2</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.colnames == ['col1']
assert len(dat) == 2
dat = Table.read(table_in, format='ascii.html', names=['a'])
assert dat.colnames == ['a']
assert len(dat) == 2
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table_fail():
"""
Raise an exception with an informative error message if table_id
is not found.
"""
table_in = ['<table id="foo"><tr><th>A</th></tr>',
'<tr><td>B</td></tr></table>']
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'},
guess=False)
assert err.match("ERROR: HTML table id 'bad_id' not found$")
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 3},
guess=False)
assert err.match("ERROR: HTML table number 3 not found$")
@pytest.mark.skipif('not HAS_BS4')
def test_backend_parsers():
"""
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid.
"""
for parser in ('lxml', 'xml', 'html.parser', 'html5lib'):
try:
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': parser}, guess=False)
except FeatureNotFound:
if parser == 'html.parser':
raise
# otherwise ignore if the dependency isn't present
# reading should fail if the parser is invalid
with pytest.raises(FeatureNotFound):
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': 'foo'}, guess=False)
@pytest.mark.skipif('HAS_BS4')
def test_htmlinputter_no_bs4():
"""
This should return an OptionalTableImportError if BeautifulSoup
is not installed.
"""
inputter = html.HTMLInputter()
with pytest.raises(core.OptionalTableImportError):
inputter.process_lines([])
@pytest.mark.skipif('not HAS_BS4')
def test_htmlinputter():
"""
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
# In absence of table_id, defaults to the first table
expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>',
'<tr><td>1</td><td>a</td><td>1.05</td></tr>',
'<tr><td>2</td><td>b</td><td>2.75</td></tr>',
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Should raise an InconsistentTableError if the table is not found
inputter.html = {'table_id': 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
# Identification by string ID
inputter.html['table_id'] = 'second'
expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>',
'<tr><td>4</td><td>d</td><td>10.5</td></tr>',
'<tr><td>5</td><td>e</td><td>27.5</td></tr>',
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Identification by integer index
inputter.html['table_id'] = 3
expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>',
'<tr><td>7</td><td>g</td><td>105.0</td></tr>',
'<tr><td>8</td><td>h</td><td>275.0</td></tr>',
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
@pytest.mark.skipif('not HAS_BS4')
def test_htmlsplitter():
"""
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements.
"""
splitter = html.HTMLSplitter()
lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>',
'html.parser').tr)]
expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']]
assert list(splitter(lines)) == expected_data
# Make sure the presence of a non-SoupString triggers a TypeError
lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>')
with pytest.raises(TypeError):
list(splitter(lines))
# Make sure that passing an empty list triggers an error
with pytest.raises(core.InconsistentTableError):
list(splitter([]))
@pytest.mark.skipif('not HAS_BS4')
def test_htmlheader_start():
"""
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
header = html.HTMLHeader()
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>C1</th><th>C2</th><th>C3</th></tr>'
# start_line should return None if no valid header is found
lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
assert header.start_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><th>Header</th></tr>')
with pytest.raises(TypeError):
header.start_line(lines)
@pytest.mark.skipif('not HAS_BS4')
def test_htmldata():
"""
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
data = html.HTMLData()
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>1</td><td>a</td><td>1.05</td></tr>'
# end_line returns the index of the last data element + 1
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>4</td><td>d</td><td>10.5</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>7</td><td>g</td><td>105.0</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>'
# start_line should raise an error if no table data exists
lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
with pytest.raises(core.InconsistentTableError):
data.start_line(lines)
# end_line should return None if no table data exists
assert data.end_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><td>Data</td></tr>')
with pytest.raises(TypeError):
data.start_line(lines)
with pytest.raises(TypeError):
data.end_line(lines)
def test_multicolumn_write():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td>a</td>
<td>a</td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td>b</td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML().write(table)[0].strip()
assert out == expected.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_multicolumn_write_escape():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td><a></a></td>
<td><a></a></td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td><b></b></td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip()
assert out == expected.strip()
def test_write_no_multicols():
"""
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
<th>C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0 .. 1.0</td>
<td>a .. a</td>
</tr>
<tr>
<td>2</td>
<td>2.0 .. 2.0</td>
<td>b .. b</td>
</tr>
<tr>
<td>3</td>
<td>3.0 .. 3.0</td>
<td>c .. c</td>
</tr>
</table>
</body>
</html>
"""
assert html.HTML({'multicol': False}).write(table)[0].strip() == \
expected.strip()
@pytest.mark.skipif('not HAS_BS4')
def test_multicolumn_read():
"""
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations.
"""
table = Table.read('data/html2.html', format='ascii.html')
str_type = np.dtype((str, 21))
expected = Table(np.array([(['1', '2.5000000000000000001'], 3),
(['1a', '1'], 3.5)],
dtype=[('A', str_type, (2,)), ('B', '<f8')]))
assert np.all(table == expected)
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write():
"""
Test that columns can contain raw HTML which is not escaped.
"""
t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b'])
# One column contains raw HTML (string input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# One column contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})
assert expected in out.getvalue()
# Two columns contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write_clean():
"""
Test that columns can contain raw HTML which is not escaped.
"""
import bleach # noqa
t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c'])
# Confirm that <script> and <p> get escaped but not <em>
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# Confirm that we can whitelist <p>
out = StringIO()
t.write(out, format='ascii.html',
htmldict={'raw_html_cols': t.colnames,
'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
def test_write_table_html_fill_values():
"""
Test that passing fill_values should replace any matching row
"""
buffer_output = StringIO()
t = Table([[1], [2]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world'),
format='html')
t_expected = Table([['Hello world'], [2]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_optional_columns():
"""
Test that passing optional column in fill_values should only replace
matching columns
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'),
format='html')
t_expected = Table([[1], ['Hello world']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8'))
t['a'] = np.ma.masked
ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'),
format='html')
t_expected = Table([['TEST'], [1]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multicolumn_table_html_fill_values():
"""
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_output = StringIO()
t = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t, buffer_output, fill_values=('a', 'z'),
format='html')
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_expected = StringIO()
t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multi_column_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables
"""
buffer_output = StringIO()
t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True)
t['a'][0:2] = np.ma.masked
t['b'][0:2] = np.ma.masked
ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')],
format='html')
t_expected = Table([['MASKED', 'MASKED', 3, 4], [
'MASKED', 'MASKED', '--', 'b']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
print(buffer_expected.getvalue())
assert buffer_output.getvalue() == buffer_expected.getvalue()
@pytest.mark.skipif('not HAS_BS4')
def test_read_html_unicode():
"""
Test reading an HTML table with unicode values
"""
table_in = ['<table>',
'<tr><td>Δ</td></tr>',
'<tr><td>Δ</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert np.all(dat['col1'] == ['Δ', 'Δ'])
|
wkrzemien/DIRAC
|
refs/heads/integration
|
Core/Utilities/ClassAd/__init__.py
|
5
|
"""
DIRAC.Core.ClassAd package
"""
__RCSID__ = "$Id$"
|
yangming85/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/modeltests/custom_pk/fields.py
|
39
|
import random
import string
from django.db import models
class MyWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __unicode__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
class MyAutoField(models.CharField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10
super(MyAutoField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper(''.join(random.sample(string.lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def get_db_prep_save(self, value):
if not value:
return
if isinstance(value, MyWrapper):
return unicode(value)
return value
def get_db_prep_value(self, value):
if not value:
return
if isinstance(value, MyWrapper):
return unicode(value)
return value
|
holmes-app/holmes-api
|
refs/heads/master
|
holmes/validators/base.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from holmes.facters import Baser
class Validator(Baser):
def __init__(self, reviewer):
self.reviewer = reviewer
self.url_buffer = set()
@classmethod
def get_violation_definitions(cls):
raise NotImplementedError
@classmethod
def get_default_violations_values(cls, config):
return {}
def add_violation(self, key, value, points):
self.reviewer.add_violation(key, value, points)
def validate(self):
return True
def enqueue(self, url):
self.reviewer.enqueue(url)
def get_violation_pref(self, key):
return self.reviewer.get_domains_violations_prefs_by_key(key)
def test_url(self, url, response, broken_link_callback=None, moved_link_callback=None):
status = response.status_code
if status > 399:
if broken_link_callback:
broken_link_callback(url, response)
return False
if status == 302 or status == 307:
if moved_link_callback:
moved_link_callback(url, response)
return False
if response.url.rstrip('/') != url.rstrip('/'):
return False
return True
def send_url(self, url, score, response):
if self.test_url(url, response, self.broken_link_violation, self.moved_link_violation):
self.url_buffer.add((url, score))
if len(self.url_buffer) > self.config.MAX_ENQUEUE_BUFFER_LENGTH:
self.flush()
def flush(self):
if not self.url_buffer:
return
self.enqueue(self.url_buffer)
self.url_buffer = set()
def broken_link_violation(self):
text = 'broken_link_violation method need to be implemented by {0}'
raise NotImplementedError(text.format(self.__class__.__name__))
def moved_link_violation(self):
text = 'moved_link_violation method need to be implemented by {0}'
raise NotImplementedError(text.format(self.__class__.__name__))
|
xuxiaoxing/flasky
|
refs/heads/master
|
app/main/views.py
|
1
|
from flask import render_template, session, redirect, url_for, current_app
from . import main
from .forms import NameForm
from .. import db
from ..models import User
from ..email import send_email
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
if current_app.config['FLASKY_ADMIN']:
send_email(current_app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'),
known=session.get('known', False))
|
yousafsyed/casperjs
|
refs/heads/master
|
bin/Lib/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py
|
2800
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
JuliusKunze/thalnet
|
refs/heads/master
|
util.py
|
1
|
import functools
from time import strftime
import tensorflow as tf
# lazy_property: no need for if $ not None logic
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def timestamp() -> str:
return strftime('%Y%m%d-%H%M%S')
# from https://gist.github.com/danijar/8663d3bbfd586bffecf6a0094cd116f2:
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if not arguments are provided. All arguments must be optional.
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def unzip(iterable):
return zip(*iterable)
def single(list):
first = list[0]
assert (len(list) == 1)
return first
|
cheesechoi/KISS
|
refs/heads/master
|
shellcode/nc.py
|
2
|
#!python
from socket import *
from pwn import *
context('i386', 'linux', 'ipv4')
HOST = ''
PORT = 31337
s = socket(AF_INET, SOCK_STREAM)
s.bind( (HOST, PORT) )
s.listen(10)
log.info('Ready to accept a client')
while 1:
conn, addr = s.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
#conn.send('id;uname -a;ifconfig -a;cat flag\n')
ShellWithSocket(conn)
s.close()
|
tjsavage/rototutor_djangononrel
|
refs/heads/master
|
djangotoolbox/sites/__init__.py
|
12133432
| |
frankvdp/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex_multi_apps/__init__.py
|
12133432
| |
jerryge/zulip
|
refs/heads/master
|
zerver/__init__.py
|
12133432
| |
havard024/prego
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/conf/locale/ru/__init__.py
|
12133432
| |
maxsocl/django
|
refs/heads/master
|
tests/admin_filters/__init__.py
|
12133432
| |
fabaff/ansible
|
refs/heads/devel
|
test/units/playbook/test_play.py
|
61
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
from units.mock.loader import DictDataLoader
class TestPlay(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_play(self):
p = Play.load(dict())
self.assertEqual(str(p), '')
def test_basic_play(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
connection='local',
remote_user="root",
sudo=True,
sudo_user="testing",
))
def test_play_with_user_conflict(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
user="testing",
gather_facts=False,
))
self.assertEqual(p.remote_user, "testing")
def test_play_with_user_conflict(self):
play_data = dict(
name="test play",
hosts=['foo'],
user="testing",
remote_user="testing",
)
self.assertRaises(AnsibleParserError, Play.load, play_data)
def test_play_with_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_handlers(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
handlers=[dict(action='shell echo "hello world"')],
))
def test_play_with_pre_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
pre_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_post_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
post_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_roles(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo/tasks.yml': """
- name: role task
shell: echo "hello world"
""",
})
mock_var_manager = MagicMock()
mock_var_manager.get_vars.return_value = dict()
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
roles=['foo'],
), loader=fake_loader, variable_manager=mock_var_manager)
blocks = p.compile()
def test_play_compile(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
blocks = p.compile()
# with a single block, there will still be three
# implicit meta flush_handler blocks inserted
self.assertEqual(len(blocks), 4)
|
rvmoura96/projeto-almoxarifado
|
refs/heads/master
|
myvenv/Lib/site-packages/pylint/test/functional/syntax_error.py
|
30
|
def toto # [syntax-error]
|
MoamerEncsConcordiaCa/tensorflow
|
refs/heads/master
|
tensorflow/python/training/ftrl.py
|
10
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
See this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
"""
def __init__(self, learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False, name="Ftrl"):
"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
Raises:
ValueError: If one of the arguments is invalid.
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value %f needs to be positive" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(self._initial_accumulator_value,
dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._name)
self._zeros_slot(v, "linear", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength,
name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength,
name="l2_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power,
name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
return training_ops.apply_ftrl(
var, accum, linear, grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
return training_ops.resource_apply_ftrl(
var.handle, accum.handle, linear.handle, grad,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
return training_ops.sparse_apply_ftrl(
var, accum, linear, grad.values, grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
return training_ops.resource_sparse_apply_ftrl(
var.handle, accum.handle, linear.handle, grad, indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
Instagram/django
|
refs/heads/master
|
tests/regressiontests/staticfiles_tests/tests.py
|
48
|
# -*- encoding: utf-8 -*-
import codecs
import os
import posixpath
import shutil
import sys
import tempfile
from StringIO import StringIO
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage
from django.core.management import call_command
from django.test import TestCase
from django.utils.encoding import smart_unicode
from django.utils._os import rmtree_errorhandler
TEST_ROOT = os.path.normcase(os.path.dirname(__file__))
class StaticFilesTestCase(TestCase):
"""
Test case with a couple utility assertions.
"""
def setUp(self):
self.old_static_url = settings.STATIC_URL
self.old_static_root = settings.STATIC_ROOT
self.old_staticfiles_dirs = settings.STATICFILES_DIRS
self.old_staticfiles_finders = settings.STATICFILES_FINDERS
self.old_media_root = settings.MEDIA_ROOT
self.old_media_url = settings.MEDIA_URL
self.old_admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
self.old_debug = settings.DEBUG
self.old_installed_apps = settings.INSTALLED_APPS
site_media = os.path.join(TEST_ROOT, 'project', 'site_media')
settings.DEBUG = True
settings.MEDIA_ROOT = os.path.join(site_media, 'media')
settings.MEDIA_URL = '/media/'
settings.STATIC_ROOT = os.path.join(site_media, 'static')
settings.STATIC_URL = '/static/'
settings.ADMIN_MEDIA_PREFIX = '/static/admin/'
settings.STATICFILES_DIRS = (
os.path.join(TEST_ROOT, 'project', 'documents'),
)
settings.STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
settings.INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.staticfiles',
'regressiontests.staticfiles_tests',
'regressiontests.staticfiles_tests.apps.test',
'regressiontests.staticfiles_tests.apps.no_label',
]
# Clear the cached default_storage out, this is because when it first
# gets accessed (by some other test), it evaluates settings.MEDIA_ROOT,
# since we're planning on changing that we need to clear out the cache.
default_storage._wrapped = None
# To make sure SVN doesn't hangs itself with the non-ASCII characters
# during checkout, we actually create one file dynamically.
self._nonascii_filepath = os.path.join(
TEST_ROOT, 'apps', 'test', 'static', 'test', u'fi\u015fier.txt')
f = codecs.open(self._nonascii_filepath, 'w', 'utf-8')
try:
f.write(u"fi\u015fier in the app dir")
finally:
f.close()
def tearDown(self):
settings.DEBUG = self.old_debug
settings.MEDIA_ROOT = self.old_media_root
settings.MEDIA_URL = self.old_media_url
settings.ADMIN_MEDIA_PREFIX = self.old_admin_media_prefix
settings.STATIC_ROOT = self.old_static_root
settings.STATIC_URL = self.old_static_url
settings.STATICFILES_DIRS = self.old_staticfiles_dirs
settings.STATICFILES_FINDERS = self.old_staticfiles_finders
settings.INSTALLED_APPS = self.old_installed_apps
if os.path.exists(self._nonascii_filepath):
os.unlink(self._nonascii_filepath)
def assertFileContains(self, filepath, text):
self.assertTrue(text in self._get_file(smart_unicode(filepath)),
u"'%s' not in '%s'" % (text, filepath))
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
class BuildStaticTestCase(StaticFilesTestCase):
"""
Tests shared by all file-resolving features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in UtilityAssertsTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BuildStaticTestCase, self).setUp()
self.old_root = settings.STATIC_ROOT
settings.STATIC_ROOT = tempfile.mkdtemp()
self.run_collectstatic()
def tearDown(self):
# Use our own error handler that can handle .svn dirs on Windows
shutil.rmtree(settings.STATIC_ROOT, ignore_errors=True,
onerror=rmtree_errorhandler)
settings.STATIC_ROOT = self.old_root
super(BuildStaticTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity='0',
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
f = codecs.open(filepath, "r", "utf-8")
try:
return f.read()
finally:
f.close()
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains(u'test/fişier.txt', u'fişier in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains(u'test/camelCase.txt', u'camelCase')
class TestFindStatic(BuildStaticTestCase, TestDefaults):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
_stdout = sys.stdout
sys.stdout = StringIO()
try:
call_command('findstatic', filepath, all=False, verbosity='0')
sys.stdout.seek(0)
lines = [l.strip() for l in sys.stdout.readlines()]
contents = codecs.open(
smart_unicode(lines[1].strip()), "r", "utf-8").read()
finally:
sys.stdout = _stdout
return contents
def test_all_files(self):
"""
Test that findstatic returns all candidate files if run without --first.
"""
_stdout = sys.stdout
sys.stdout = StringIO()
try:
call_command('findstatic', 'test/file.txt', verbosity='0')
sys.stdout.seek(0)
lines = [l.strip() for l in sys.stdout.readlines()]
finally:
sys.stdout = _stdout
self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line
self.assertTrue('project' in lines[1])
self.assertTrue('apps' in lines[2])
class TestBuildStatic(BuildStaticTestCase, TestDefaults):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
Test that -i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestBuildStaticExcludeNoDefaultIgnore(BuildStaticTestCase, TestDefaults):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options for
``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestBuildStaticExcludeNoDefaultIgnore, self).run_collectstatic(
use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
class TestNoFilesCreated(object):
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestBuildStaticDryRun(BuildStaticTestCase, TestNoFilesCreated):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestBuildStaticDryRun, self).run_collectstatic(dry_run=True)
class TestBuildStaticNonLocalStorage(BuildStaticTestCase, TestNoFilesCreated):
"""
Tests for #15035
"""
def setUp(self):
self.old_staticfiles_storage = settings.STATICFILES_STORAGE
settings.STATICFILES_STORAGE = 'regressiontests.staticfiles_tests.storage.DummyStorage'
super(TestBuildStaticNonLocalStorage, self).setUp()
def tearDown(self):
super(TestBuildStaticNonLocalStorage, self).tearDown()
settings.STATICFILES_STORAGE = self.old_staticfiles_storage
if sys.platform != 'win32':
class TestBuildStaticLinks(BuildStaticTestCase, TestDefaults):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self):
super(TestBuildStaticLinks, self).run_collectstatic(link=True)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, 'test.txt')))
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
urls = 'regressiontests.staticfiles_tests.urls.default'
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
class TestServeDisabled(TestServeStatic):
"""
Test serving static files disabled when DEBUG is False.
"""
def setUp(self):
super(TestServeDisabled, self).setUp()
settings.DEBUG = False
def test_disabled_serving(self):
self.assertRaisesRegexp(ImproperlyConfigured, 'The staticfiles view '
'can only be used in debug mode ', self._response, 'test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
"""
Test static asset serving view with manually configured URLconf.
"""
pass
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
urls = 'regressiontests.staticfiles_tests.urls.helper'
class TestServeAdminMedia(TestServeStatic):
"""
Test serving media from django.contrib.admin.
"""
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.ADMIN_MEDIA_PREFIX, filepath))
def test_serve_admin_media(self):
self.assertFileContains('css/base.css', 'body')
class FinderTestCase(object):
"""
Base finder test mixin
"""
def test_find_first(self):
src, dst = self.find_first
self.assertEqual(self.finder.find(src), dst)
def test_find_all(self):
src, dst = self.find_all
self.assertEqual(self.finder.find(src, all=True), dst)
class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase):
"""
Test FileSystemFinder.
"""
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt')
self.find_first = (os.path.join('test', 'file.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file.txt'), [test_file_path])
class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt')
self.find_first = (os.path.join('test', 'file1.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path])
class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase):
"""
Test DefaultStorageFinder.
"""
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=storage.StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ('media-file.txt', test_file_path)
self.find_all = ('media-file.txt', [test_file_path])
class TestMiscFinder(TestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertTrue(isinstance(finders.get_finder(
'django.contrib.staticfiles.finders.FileSystemFinder'),
finders.FileSystemFinder))
def test_get_finder_bad_classname(self):
self.assertRaises(ImproperlyConfigured,
finders.get_finder, 'django.contrib.staticfiles.finders.FooBarFinder')
def test_get_finder_bad_module(self):
self.assertRaises(ImproperlyConfigured,
finders.get_finder, 'foo.bar.FooBarFinder')
class TestStaticfilesDirsType(TestCase):
"""
We can't determine if STATICFILES_DIRS is set correctly just by looking at
the type, but we can determine if it's definitely wrong.
"""
def setUp(self):
self.old_settings_dir = settings.STATICFILES_DIRS
settings.STATICFILES_DIRS = 'a string'
def tearDown(self):
settings.STATICFILES_DIRS = self.old_settings_dir
def test_non_tuple_raises_exception(self):
self.assertRaises(ImproperlyConfigured, finders.FileSystemFinder)
|
DaveBerkeley/android
|
refs/heads/master
|
MqttControls/mqtt.py
|
1
|
#!/usr/bin/python
import json
class Control:
def __init__(self, klass, *args, **kwargs):
self.d = [ klass, kwargs, ]
def json(self):
return self.d
#
#
def GridView(elements, *args, **kwargs):
cols = 0
for col in elements:
cols = max(cols, len(col))
rows = len(elements)
# re-order into a linear sequence
seq = []
for row in elements:
for i in range(cols):
if i < len(row):
seq.append(row[i] or TextLabel(""))
else:
seq.append(TextLabel(""))
c = Control("GridView", elements=seq, rows=rows, cols=cols, **kwargs)
return c.json()
def Page(name, elements, *args, **kwargs):
c = Control("Page", title=name, elements=elements, **kwargs)
return c.json()
def Button(text, topic, send, *args, **kwargs):
c = Control("Button", text=text, topic=topic, send=send, **kwargs)
return c.json()
def TextLabel(text, *args, **kwargs):
c = Control("TextLabel", text=text, **kwargs)
return c.json()
def TextView(topic, field=None, *args, **kwargs):
c = Control("TextView", topic=topic, field=field, **kwargs)
return c.json()
def CheckBox(topic, field=None):
c = Control("CheckBox", topic=topic, field=field)
return c.json()
def ProgressBar(minf, maxf, topic, field=None):
c = Control("ProgressBar", min=minf, max=maxf, topic=topic, field=field)
return c.json()
def SeekBar(minf, maxf, topic):
c = Control("SeekBar", min=minf, max=maxf, topic=topic)
return c.json()
def Bell(topic, field=None):
c = Control("Bell", topic=topic, field=field)
return c.json()
def GPS(topic):
c = Control("GPS", topic=topic)
return c.json()
def Url(topic, text="text", url="url", *args, **kwargs):
c = Control("Url", topic=topic, text=text, url=url, **kwargs)
return c.json()
def EditText(topic, field=None, *args, **kwargs):
c = Control("EditText", topic=topic, field=field, **kwargs)
return c.json()
def WebView(topic, field=None, *args, **kwargs):
c = Control("WebView", topic=topic, field=field, **kwargs)
return c.json()
#
#
water = [
[
TextLabel("x"),
TextView("home/jeenet/magnetometerdev_12", "x", post=" mGs"),
],
[
TextLabel("y"),
TextView("home/jeenet/magnetometerdev_12", "y", post=" mGs"),
],
[
TextLabel("z"),
TextView("home/jeenet/magnetometerdev_12", "z", post=" mGs"),
],
]
watermeter = [
TextLabel("Water Meter"),
GridView(water),
TextView("home/jeenet/magnetometerdev_12", "time"),
TextView("home/jeenet/magnetometerdev_12", "vcc", post=" V"),
TextView("home/jeenet/magnetometerdev_12", "temp", post=" C"),
]
#
#
temp = [
[
TextLabel("Office"),
ProgressBar(0.0, 30.0, "home/node/105", "temp"),
TextView("home/node/105", "temp", post=" C"),
],
[
TextLabel("Office - 9V"),
ProgressBar(0.0, 30.0, "home/jeenet/voltagedev_10", "temp"),
TextView("home/jeenet/voltagedev_10", "temp", post=" C"),
],
[
TextLabel("Front Room"),
ProgressBar(0.0, 30.0, "home/node/104", "temp"),
TextView("home/node/104", "temp", post=" C"),
],
[
TextLabel("Front Room (test)"),
ProgressBar(0.0, 30.0, "home/jeenet/testdev_1", "temp"),
TextView("home/jeenet/testdev_1", "temp", post=" C"),
],
[
TextLabel("Front Bedroom"),
ProgressBar(0.0, 30.0, "home/node/109", "temp"),
TextView("home/node/109", "temp", post=" C"),
],
[
TextLabel("Back Room (esp)"),
ProgressBar(0.0, 30.0, "home/node/108", "temp"),
TextView("home/node/108", "temp", post=" C"),
],
[
TextLabel("Back Room (humidity)"),
ProgressBar(0.0, 30.0, "home/jeenet/humiditydev_2", "temp"),
TextView("home/jeenet/humiditydev_2", "temp", post=" C"),
],
[
TextLabel("Porch"),
ProgressBar(0.0, 30.0, "home/jeenet/voltagedev_9", "temp"),
TextView("home/jeenet/voltagedev_9", "temp", post=" C"),
],
[
TextLabel("Car"),
ProgressBar(0.0, 30.0, "home/jeenet/voltagedev_11", "temp"),
TextView("home/jeenet/voltagedev_11", "temp", post=" C"),
],
[
TextLabel("Water Meter"),
ProgressBar(0.0, 30.0, "home/jeenet/magnetometerdev_12", "temp"),
TextView("home/jeenet/magnetometerdev_12", "temp", post=" C"),
],
[
TextLabel("Servers:"),
],
[
TextLabel("Gateway"),
ProgressBar(0.0, 50.0, "home/jeenet/gateway", "temp"),
TextView("home/jeenet/gateway", "temp", post=" C"),
],
[
TextLabel("klatu 0"),
ProgressBar(0.0, 50.0, "home/net/klatu", "temp_0"),
TextView("home/net/klatu", "temp_0", post=" C"),
],
[
TextLabel("klatu 1"),
ProgressBar(0.0, 50.0, "home/net/klatu", "temp_1"),
TextView("home/net/klatu", "temp_1", post=" C"),
],
]
temperature = [
TextLabel("Temperature"),
GridView(temp),
]
#
#
keys = [
[
Button("1", "uif/kb", "1"),
Button("2", "uif/kb", "2"),
Button("3", "uif/kb", "3"),
],
[
Button("4", "uif/kb", "4"),
Button("5", "uif/kb", "5"),
Button("6", "uif/kb", "6"),
],
[
Button("7", "uif/kb", "7"),
Button("8", "uif/kb", "8"),
Button("9", "uif/kb", "9"),
],
[
Button("*", "uif/kb", '"*"'),
Button("0", "uif/kb", "0"),
Button("#", "uif/kb", '"#"'),
],
[
None,
Button("CLR", "uif/clr", "1"),
],
]
keyboard = [
TextLabel("Keyboard Example"),
TextView("uif/result"),
GridView(keys),
]
#
#
controls = [
ProgressBar(0.0, 180.0, "uif/seek/1"),
GridView([
[
TextLabel("Car Battery"),
ProgressBar(11.0, 13.0, "home/jeenet/voltagedev_11", "voltage"),
TextView("home/jeenet/voltagedev_11", "voltage", post=" V"),
],
[
TextLabel("Car Temp."),
ProgressBar(0.0, 35.0, "home/jeenet/voltagedev_11", "temp"),
TextView("home/jeenet/voltagedev_11", "temp", post=" C"),
],
[
TextLabel("River Levels"),
],
[
TextLabel("Newlyn"),
ProgressBar(-2.5, 2.8, "rivers/level/3156", "level"),
TextView("rivers/level/3156", "level", post=" m"),
],
[
TextLabel("Devonport"),
ProgressBar(-2.5, 2.8, "rivers/level/3344", "level"),
TextView("rivers/level/3344", "level", post=" m"),
],
[
TextLabel("Richmond"),
ProgressBar(-1.0, 4.8, "rivers/level/7393", "level"),
TextView("rivers/level/7393", "level", post=" m"),
],
]),
TextLabel("Gas Meter (sector)"),
ProgressBar(63.0, 0.0, "home/gas", "sector"),
GridView([
[
TextLabel("Relay"),
Button("On", "rpc/jeenet", '{"device": "relaydev_7", "fn": "set_relay", "args":[1]}'),
Button("Off", "rpc/jeenet", '{"device": "relaydev_7", "fn": "set_relay", "args":[0]}'),
CheckBox("home/jeenet/relaydev_7", "relay"),
TextView("home/jeenet/relaydev_7", "temp", post=" C"),
]
]),
SeekBar(0.0, 180.0, "uif/seek/1"),
TextLabel("Export"),
ProgressBar(0.0, -3000.0, "home/power", "power"),
TextLabel("Import"),
ProgressBar(0.0, 3000.0, "home/power", "power"),
TextView("home/power", "power", post=" W"),
GPS("uif/gps/%I"),
Url("url/alert/dave", fontsize=20, textcolor="blue"),
TextLabel(""),
GridView([
[
Button("Power Delta", "uif/button/power_delta", "1"),
CheckBox("uif/power_delta"),
TextView("uif/power_max", post=" W"),
Bell("bell/1"),
]
], fontsize=18, textcolor="red"),
TextLabel("first"),
TextLabel("another"),
TextLabel("FIN"),
]
#
#
def make_chat(title, ident, other):
chat = [
TextLabel(title),
TextView("uif/text/" + other, fontsize=25),
EditText("uif/text/" + ident),
]
return chat
chat1 = make_chat("Chat 1", "maggie", "dave")
chat2 = make_chat("Chat 2", "dave", "maggie")
#
#
web = [
WebView("url/alert/dave", "url"),
]
#
#
player = [
TextView("home/player/status", "status", fontsize=30),
Button("Play", "home/player/control", r'"play"'),
Button("Stop", "home/player/control", r'"stop"'),
Button("Pause", "home/player/control", r'"pause"'),
Button("Debug", "home/player/control", r'"dir"'),
ProgressBar(0.0, 100.0, "home/player/status", "percent"),
]
#
#
pages = [
Page("Main Page", controls),
Page("Player", player),
Page("Web View", web),
Page("Chat 1", chat1),
Page("Chat 2", chat2),
Page("Temperature", temperature),
#Page("Water Meter", watermeter),
Page("Keyboard", keyboard),
]
#
#
print "Content-type: application/json"
print
print json.dumps(pages)
# FIN
|
twschum/mix-mind
|
refs/heads/master
|
mixmind/util.py
|
1
|
""" Miscallanious util funcitons for the mix-mind project
"""
from functools import wraps
from fractions import Fraction
from collections import OrderedDict, namedtuple
import operator
import json
import csv
import inspect
import uuid
import pendulum
from .logger import get_logger
log = get_logger(__name__)
# make passing a bunch of options around a bit cleaner
DisplayOptions = namedtuple('DisplayOptions', 'prices,stats,examples,all_ingredients,markup,prep_line,origin,info,variants')
FilterOptions = namedtuple('FilterOptions', 'search,all_,include,exclude,include_use_or,exclude_use_or,style,glass,prep,ice,tag')
PdfOptions = namedtuple('PdfOptions', 'pdf_filename,ncols,liquor_list,liquor_list_own_page,debug,align,title,tagline')
VALID_UNITS = ['oz', 'mL', 'cL']
class ResultRecipes(object):
def add_items(self, recipes):
pass
def get_items(self):
pass
class UnionResultRecipes(ResultRecipes):
def __init__(self):
self.container = OrderedDict()
def add_items(self, recipes):
for recipe in recipes:
self.container[recipe.name] = recipe
def get_items(self):
return list(self.container.values())
class IntersectionResultRecipes(ResultRecipes):
def __init__(self):
self.container = None
def add_items(self, recipes):
if self.container is None:
self.container = recipes
else:
self.container = [x for x in self.container if x in recipes]
def get_items(self):
return self.container
def filter_recipes(all_recipes, filter_options, union_results=False):
"""Filters the recipe list based on a FilterOptions bundle of parameters
:param list[Recipe] all_recipes: list of recipe object to filter
:param FilterOptions filter_options: bundle of filtering parameters
search str: search an arbitrary string against the ingredients and attributes
:param bool union_results: for each attributes searched against, combine results
with set intersection by default, or union if True
"""
result_recipes = UnionResultRecipes() if union_results else IntersectionResultRecipes()
recipes = [recipe for recipe in all_recipes if filter_options.all_ or recipe.can_make]
if filter_options.search:
include_list = [filter_options.search.lower()]
else:
include_list = [i.lower() for i in filter_options.include]
if include_list:
reduce_fn = any if filter_options.include_use_or else all
result_recipes.add_items([recipe for recipe in recipes if
reduce_fn((recipe.contains_ingredient(ingredient, include_optional=True)
for ingredient in include_list))])
if filter_options.exclude:
reduce_fn = any if filter_options.exclude_use_or else all
result_recipes.add_items([recipe for recipe in recipes if
reduce_fn((not recipe.contains_ingredient(ingredient, include_optional=False)
for ingredient in filter_options.exclude))])
for attr in 'style glass prep ice tag'.split():
result_recipes.add_items(filter_on_attribute(recipes, filter_options, attr))
result_recipes = result_recipes.get_items()
def get_names(items):
return set([i.name for i in items])
excluded = sorted(list(get_names(all_recipes) - get_names(result_recipes)))
log.debug("Excluded: {}\n".format(', '.join(excluded)))
return result_recipes, excluded
def filter_on_attribute(recipes, filter_options, attribute):
attr_value = getattr(filter_options, attribute).lower()
if filter_options.search and not attr_value:
attr_value = filter_options.search.lower()
if attr_value:
recipes = [recipe for recipe in recipes if attr_value in getattr(recipe, attribute).lower()]
return recipes
def get_uuid():
return str(uuid.uuid4())
class StatTracker(dict):
# mutable class variables
_title_width = 0
_name_width = 0
def __init__(self, attr, magnitude, str_title):
if magnitude not in ('max', 'min'):
raise ValueError('StatTracker magnitude must be "max" or "min"')
self.op = operator.lt if magnitude == 'min' else operator.gt
self.stat = '{}_{}'.format(magnitude, attr)
self.val_attr = attr
self.val = float('inf') if magnitude == 'min' else 0.0
self['title'] = str_title
if len(str_title) > StatTracker._title_width:
StatTracker._title_width = len(str_title)
def __str__(self):
return "{{title:{}}} | {{drink_name:{}}} | ${{cost:.2f}} | {{abv:>5.2f}}% ABV | {{std_drinks:.2f}} | {{kinds}}"\
.format(self._title_width+1, self._name_width+1).format(**self)
def as_html(self):
return "<tr><td> {{title:{}}} </td><td> {{drink_name:{}}} </td><td> ${{cost:.2f}} </td><td> {{abv:>5.2f}}% ABV </td><td> {{std_drinks:.2f}} </td><td style:text-align=left> {{kinds}} </td></tr>"\
.format(self._title_width+1, self._name_width+1).format(**self)
def update_stat(self, recipe):
example = getattr(recipe.stats, self.stat)
ex_val = getattr(example, self.val_attr)
if self.op(ex_val, self.val):
self.val = ex_val
self.update(example._asdict())
self['drink_name'] = recipe.name
if len(recipe.name) > StatTracker._name_width:
StatTracker._name_width = len(recipe.name)
def report_stats(recipes, as_html=False):
most_expensive = StatTracker('cost', 'max', 'Most Expensive')
most_booze = StatTracker('std_drinks', 'max', 'Most Std Drinks')
most_abv = StatTracker('abv', 'max', 'Highest Estimated ABV')
least_expensive = StatTracker('cost', 'min', 'Least Expensive')
least_booze = StatTracker('std_drinks', 'min', 'Fewest Std Drinks')
least_abv = StatTracker('abv', 'min', 'Lowest Estimated ABV')
for recipe in recipes:
if recipe.calculate_stats():
most_expensive.update_stat(recipe)
most_booze.update_stat(recipe)
most_abv.update_stat(recipe)
least_expensive.update_stat(recipe)
least_booze.update_stat(recipe)
least_abv.update_stat(recipe)
if as_html:
return "<table class=statsbloc><thead><th></th><th>Drink</th><th style='text-align: right;'>Cost</th><th style='text-align: right;'>Est ABV</th><th style='text-align: right;'>Std Drinks</th><th>Ingredients</th></thead><tbody>{}</tbody></table>".format(''.join([s.as_html()
for s in [most_expensive, most_booze, most_abv, least_expensive, least_booze, least_abv]]))
else:
return [most_expensive, most_booze, most_abv, least_expensive, least_booze, least_abv]
def load_recipe_json(recipe_files):
base_recipes = OrderedDict()
for recipe_json in recipe_files:
with open(recipe_json) as fp:
other_recipes = json.load(fp, object_pairs_hook=OrderedDict)
log.info("Recipes loaded from {}".format(recipe_json))
for item in other_recipes.values():
item.update({'source_file': recipe_json})
for name in [name for name in list(other_recipes.keys()) if name in list(base_recipes.keys())]:
log.debug("Keeping {} from {} over {}".format(name, base_recipes[name]['source_file'], other_recipes[name]['source_file']))
del other_recipes[name]
base_recipes.update(other_recipes)
return base_recipes
def default_initializer(func):
names, varargs, keywords, defaults = inspect.getargspec(func)
@wraps(func)
def wrapper(self, *args, **kwargs):
for name, arg in list(zip(names[1:], args)) + list(kwargs.items()):
setattr(self, name, arg)
func(self, *args, **kwargs)
return wrapper
# utils to convert string values
def from_float(s):
if not s:
return 0.0
return float(s)
def from_price_float(s):
if isinstance(s, str):
return from_float(s.replace('$', ''))
return from_float(s)
def from_bool_from_num(s):
if not s:
return False
return bool(float(s))
def to_fraction(amount):
""" Get a mixed number notation
0.5 -> 1/2
1.25 -> 1 1/4
"""
fraction = Fraction.from_float(float(amount)).limit_denominator(99)
if fraction.denominator == 1:
return fraction.numerator
whole = fraction.numerator // fraction.denominator
numer = fraction.numerator % fraction.denominator
return "{}{}/{}".format(str(int(whole))+' ' if whole > 0 else '', numer, fraction.denominator)
def calculate_price(cost, markup):
return int(((cost + 1) * float(markup)) +1)
def calculate_std_drinks(abv, amount, unit):
""" Standard drink is 1.5 oz or 45 ml at 40% abv
"""
adjusted_abv = abv / 40.0
adjusted_amount = convert_units(amount, unit, 'oz') / 1.5
return adjusted_abv * adjusted_amount
def calculate_abv(std_drinks, volume, unit):
if unit == 'oz':
units_per_std_drink = 1.5
elif unit == 'mL':
units_per_std_drink = 45.0
elif unit == 'cL':
units_per_std_drink = 4.5
else:
raise NotImplementedError("number of standard drinks for unit '{}' is unknown".format(unit))
abv = 40.0 * (std_drinks*units_per_std_drink / volume)
return abv
# units, yo
ML_PER_OZ = 29.5735
ML_PER_OZ_ROUNDED = 30.0
ML_PER_TSP = 4.92892
ML_PER_TSP_ROUNDED = 5.0
ML_PER_DS = 0.92
ML_PER_CL = 10.0
ML_PER_DROP = 0.12
OZ_PER_TSP = 1.0/8.0
OZ_PER_DS = 1.0/32.0
OZ_PER_DROP = 1.0/240.0
def convert_units(amount, from_unit, to_unit, rounded=False):
if from_unit == 'literal':
return amount
try:
amount = float(amount)
except TypeError: # pd series breaks this
amount = amount
if from_unit == to_unit:
return amount
unit_conversions = {
'ds': dash_to_volume,
'tsp': tsp_to_volume,
'mL': mL_to_volume,
'cL': cL_to_volume,
'oz': oz_to_volume,
'drop': drop_to_volume,
}
convert = unit_conversions.get(from_unit, lambda x,y,z: no_conversion(from_unit, to_unit))
return convert(amount, to_unit, rounded)
def no_conversion(from_unit, to_unit):
raise NotImplementedError("conversion from {} to {}".format(from_unit, to_unit))
def dash_to_volume(amount, unit, rounded=False):
mL_per_oz = ML_PER_OZ if not rounded else ML_PER_OZ_ROUNDED
if unit == 'mL':
return amount * ML_PER_DS
elif unit == 'cL':
return amount * ML_PER_DS / ML_PER_CL
elif unit == 'oz':
return amount / mL_per_oz
else:
no_conversion('dash', unit)
def tsp_to_volume(amount, unit, rounded=False):
mL_per_tsp = ML_PER_TSP if not rounded else ML_PER_TSP_ROUNDED
if unit == 'oz':
return amount * OZ_PER_TSP
elif unit == 'mL':
return amount * mL_per_tsp
elif unit == 'cL':
return amount * mL_per_tsp / ML_PER_CL
else:
no_conversion('tsp', unit)
def oz_to_volume(amount, unit, rounded=False):
mL_per_oz = ML_PER_OZ if not rounded else ML_PER_OZ_ROUNDED
if unit == 'mL':
return amount * mL_per_oz
elif unit == 'cL':
return amount * mL_per_oz / ML_PER_CL
elif unit == 'tsp':
return amount / OZ_PER_TSP
elif unit == 'ds':
return amount / OZ_PER_DS
elif unit == 'drop':
return amount / OZ_PER_DROP
else:
no_conversion('oz', unit)
def mL_to_volume(amount, unit, rounded=False):
mL_per_oz = ML_PER_OZ if not rounded else ML_PER_OZ_ROUNDED
mL_per_tsp = ML_PER_TSP if not rounded else ML_PER_TSP_ROUNDED
if unit == 'oz':
return amount / mL_per_oz
elif unit == 'cL':
return amount / ML_PER_CL
elif unit == 'ds':
return amount / ML_PER_DS
elif unit == 'tsp':
return amount / mL_per_tsp
elif unit == 'drop':
return amount / ML_PER_DROP
elif unit == 'mL':
return amount
else:
no_conversion('mL', unit)
def drop_to_volume(amount, unit, rounded=False):
if unit == 'oz':
return amount * OZ_PER_DROP
elif unit == 'mL':
return amount * ML_PER_DROP
elif unit == 'cL':
return amount * ML_PER_DROP / ML_PER_CL
else:
no_conversion('drop', unit)
def cL_to_volume(amount, unit, rounded=False):
try:
return mL_to_volume(amount, unit, rounded) * ML_PER_CL
except NotImplementedError:
no_conversion('cL', unit)
class IngredientSpecifier(object):
""" Allow ingredient:kind in recipes,
e.g. "white rum:Barcadi Catra Blanca" or "aromatic bitters:Angostura"
"""
@default_initializer
def __init__(self, ingredient, kind=None):
if ingredient is None:
raise ValueError("IngredientSpecifier ingredient (type) cannot be None")
if '(' in ingredient and ')' in ingredient:
self.extra = ingredient.strip()[ingredient.find('('):]
self.ingredient = ingredient.strip()[:ingredient.find('(')].strip()
else:
self.extra = None
@classmethod
def from_string(cls, type_str):
if ':' in type_str:
t = type_str.split(':')
if len(t) == 2:
ingredient = t[0]
kind = t[1]
else:
raise ValueError("Unknown ingredient specifier: {}".format(type_str))
else:
ingredient = type_str
kind = None
return cls(ingredient, kind)
def __str__(self):
return self.kind if self.kind else "{}{}".format(self.ingredient, ' '+self.extra if self.extra else '')
def __repr__(self):
return "{}:{}".format(self.ingredient, self.kind if self.kind else '')
def to_human_diff(dt):
"""Return datetime as humanized diff from now"""
return pendulum.instance(dt).diff_for_humans() if dt else '-'
def get_ts_formatter(fmt, tz):
"""Returns callable that will format a datetime"""
return lambda dt: pendulum.instance(dt).in_timezone(tz).format(fmt) if dt else '-'
|
pk400/catering
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/django/db/models/query.py
|
33
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField
from django.db.models.query_utils import (
Q, InvalidQuery, check_rel_lookup_compatibility, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field. Multi-table models are not supported.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
with transaction.atomic(using=self.db, savepoint=False):
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def _values(self, *fields):
clone = self._clone()
clone._fields = fields
query = clone.query
query.select_related = False
query.clear_deferred_loading()
query.clear_select_fields()
if query.group_by is True:
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
query.set_group_by()
query.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not query._extra and not query._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
query.default_cols = False
for f in fields:
if f in query.extra_select:
extra_names.append(f)
elif f in query.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
query.set_extra_mask(extra_names)
query.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
query.values_select = field_names
query.add_fields(field_names, True)
return clone
def values(self, *fields):
clone = self._values(*fields)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self._values(*fields)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
alexei-matveev/ase-local
|
refs/heads/master
|
doc/tutorials/constraints/diffusion.py
|
7
|
# creates: diffusion-path.png
import os
from ase.io import read, write
if 1:
execfile('diffusion4.py')
images = [read('mep%d.traj' % i) for i in range(5)]
a = images[0] + images[1] + images[2] + images[3] + images[4]
del a.constraints
a *= (2, 1, 1)
a.set_cell(images[0].get_cell())
write('diffusion-path.pov', a, show_unit_cell=2, rotation='-90x',
transparent=False, display=False, run_povray=True)
|
kashif/chainer
|
refs/heads/master
|
chainer/links/model/classifier.py
|
1
|
from chainer.functions.evaluation import accuracy
from chainer.functions.loss import softmax_cross_entropy
from chainer import link
from chainer import reporter
class Classifier(link.Chain):
"""A simple classifier model.
This is an example of chain that wraps another chain. It computes the
loss and accuracy based on a given input/label pair.
Args:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
label_key (int or str): Key to specify label variable from arguments.
When it is ``int``, a variable in positional arguments is used.
And when it is ``str``, a variable in keyword arguments is used.
Attributes:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
y (~chainer.Variable): Prediction for the last minibatch.
loss (~chainer.Variable): Loss value for the last minibatch.
accuracy (~chainer.Variable): Accuracy for the last minibatch.
compute_accuracy (bool): If ``True``, compute accuracy on the forward
computation. The default value is ``True``.
"""
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy,
label_key=-1):
if not (isinstance(label_key, (int, str))):
raise TypeError('label_key must be int or str, but is %s' %
type(label_key))
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
self.label_key = label_key
with self.init_scope():
self.predictor = predictor
def __call__(self, *args, **kwargs):
"""Computes the loss value for an input and label pair.
It also computes accuracy and stores it to the attribute.
Args:
args (list of ~chainer.Variable): Input minibatch.
kwargs (dict of ~chainer.Variable): Input minibatch.
When ``label_key`` is ``int``, the correpoding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the ground trush
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = 'Label key %d is out of bounds' % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[:self.label_key] + args[self.label_key + 1:]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*args, **kwargs)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
|
t794104/ansible
|
refs/heads/devel
|
test/units/modules/storage/netapp/test_na_ontap_security_key_manager.py
|
43
|
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_security_key_manager \
import NetAppOntapSecurityKeyManager as key_manager_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'key_manager':
xml = self.build_port_info(self.data)
self.xml_out = xml
return xml
@staticmethod
def build_port_info(key_manager_details):
''' build xml data for-key-manager-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'key-manager-info': {
'key-manager-ip-address': '0.0.0.0',
'key-manager-server-status': 'available',
'key-manager-tcp-port': '5696',
'node-name': 'test_node'
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_key_manager = {
'node_name': 'test_node',
'tcp_port': 5696,
'ip_address': '0.0.0.0',
'server_status': 'available'
}
def mock_args(self):
return {
'node': self.mock_key_manager['node_name'],
'tcp_port': self.mock_key_manager['tcp_port'],
'ip_address': self.mock_key_manager['ip_address'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'https': 'False'
}
def get_key_manager_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_security_key_manager object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_security_key_manager object
"""
obj = key_manager_module()
obj.asup_log_for_cserver = Mock(return_value=None)
obj.cluster = Mock()
obj.cluster.invoke_successfully = Mock()
if kind is None:
obj.cluster = MockONTAPConnection()
else:
obj.cluster = MockONTAPConnection(kind=kind, data=self.mock_key_manager)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
key_manager_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_key_manager(self):
''' Test if get_key_manager() returns None for non-existent key manager '''
set_module_args(self.mock_args())
result = self.get_key_manager_mock_object().get_key_manager()
assert result is None
def test_get_existing_key_manager(self):
''' Test if get_key_manager() returns details for existing key manager '''
set_module_args(self.mock_args())
result = self.get_key_manager_mock_object('key_manager').get_key_manager()
assert result['ip_address'] == self.mock_key_manager['ip_address']
@patch('ansible.modules.storage.netapp.na_ontap_security_key_manager.NetAppOntapSecurityKeyManager.get_key_manager')
def test_successfully_add_key_manager(self, get_key_manager):
''' Test successfully add key manager'''
data = self.mock_args()
data['state'] = 'present'
set_module_args(data)
get_key_manager.side_effect = [
None
]
obj = self.get_key_manager_mock_object('key_manager')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
def test_successfully_delete_key_manager(self):
''' Test successfully delete key manager'''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
obj = self.get_key_manager_mock_object('key_manager')
with pytest.raises(AnsibleExitJson) as exc:
obj.apply()
assert exc.value.args[0]['changed']
|
lthurlow/Network-Grapher
|
refs/heads/master
|
proj/external/matplotlib-1.2.1/lib/mpl_examples/animation/old_animation/animation_blit_fltk.py
|
3
|
from __future__ import print_function
import sys
import fltk
import matplotlib
matplotlib.use('FltkAgg')
import pylab as p
import numpy as npy
import time
# save the clean slate background -- everything but the animated line
# is drawn and saved in the pixel buffer background
class animator:
def __init__(self,ax):
self.ax=ax
self.canvas=ax.figure.canvas
self.canvas.mpl_connect('draw_event',self.clear)
self.cnt=0
self.background=None
# for profiling
self.tstart = time.time()
def clear(self,event):
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def update(self,ptr):
# restore the clean slate background
if self.background is None:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.canvas.restore_region(self.background)
# update the data
line.set_ydata(npy.sin(x+self.cnt/10.0))
# just draw the animated artist
self.ax.draw_artist(line)
# just redraw the axes rectangle
self.canvas.blit(ax.bbox)
self.cnt+=1
if self.cnt==1000:
# print the timing info and quit
print('FPS:' , 1000/(time.time()-self.tstart))
sys.exit()
return True
ax = p.subplot(111)
p.subplots_adjust(left=0.3, bottom=0.3) # check for flipy bugs
p.grid() # to ensure proper background restore
# create the initial line
x = npy.arange(0,2*npy.pi,0.01)
line, = p.plot(x, npy.sin(x), animated=True)
p.draw()
anim=animator(ax)
fltk.Fl.add_idle(anim.update)
fltk.Fl.run()
|
daanwierstra/pybrain
|
refs/heads/master
|
docs/tutorials/fnn.py
|
1
|
############################################################################
# PyBrain Tutorial "Classification with Feed-Forward Neural Networks"
#
# Author: Martin Felder, felder@in.tum.de
############################################################################
""" This tutorial walks you through the process of setting up a dataset
for classification, and train a network on it while visualizing the results
online.
First we need to import the necessary components from PyBrain."""
from pybrain.datasets import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
""" Furthermore, pylab is needed for the graphical output. """
from pylab import ion, ioff, figure, draw, contourf, clf, show, hold, plot
from scipy import diag, arange, meshgrid, where
from numpy.random import multivariate_normal
""" To have a nice dataset for visualization, we produce a set of
points in 2D belonging to three different classes. You could also
read in your data from a file, e.g. using pylab.load(). """
means = [(-1,0),(2,4),(3,1)]
cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
alldata = ClassificationDataSet(2, 1, nb_classes=3)
for n in xrange(400):
for klass in range(3):
input = multivariate_normal(means[klass],cov[klass])
alldata.addSample(input, [klass])
""" Randomly split the dataset into 75% training and 25% test data sets. Of course, we
could also have created two different datasets to begin with."""
tstdata, trndata = alldata.splitWithProportion( 0.25 )
""" For neural network classification, it is highly advisable to encode classes
with one output neuron per class. Note that this operation duplicates the original
targets and stores them in an (integer) field named 'class'."""
trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )
""" Test our dataset by printing a little information about it. """
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]
""" Now build a feed-forward network with 5 hidden units. We use the a convenience
function for this. The input and output
layer size must match the dataset's input and target dimension. You could add
additional hidden layers by inserting more numbers giving the desired layer sizes.
The output layer uses a softmax function because we are doing classification.
There are more options to explore here, e.g. try changing the hidden layer transfer
function to linear instead of (the default) sigmoid.
.. seealso:: Desciption :func:`buildNetwork` for more info on options,
and the Network tutorial :ref:`netmodcon` for info on how to build
your own non-standard networks.
"""
fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
""" Set up a trainer that basically takes the network and training dataset as input.
Currently the backpropagation and RPROP learning algorithms are implemented. See their
description for possible parameters. If you don't want to deal with this, just use RPROP
with default parameters. """
trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
#trainer = RPropMinusTrainer( fnn, dataset=trndata, verbose=True)
""" Now generate a square grid of data points and put it into a dataset,
which we can then classifiy to obtain a nice contour field for visualization.
Therefore the target values for this data set can be ignored."""
ticks = arange(-3.,6.,0.2)
X, Y = meshgrid(ticks, ticks)
# need column vectors in dataset, not arrays
griddata = ClassificationDataSet(2,1, nb_classes=3)
for i in xrange(X.size):
griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])
griddata._convertToOneOfMany() # this is still needed to make the fnn feel comfy
""" Start the training iterations. """
for i in range(20):
""" Train the network for some epochs. Usually you would set something like 5 here,
but for visualization purposes we do this one epoch at a time."""
trainer.trainEpochs( 1 )
""" Evaluate the network on the training and test data. There are several ways to do this - check
out the :mod:`pybrain.tools.validation` module, for instance. Here we let the trainer do the test. """
trnresult = percentError( trainer.testOnClassData(),
trndata['class'] )
tstresult = percentError( trainer.testOnClassData(
dataset=tstdata ), tstdata['class'] )
print "epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult
""" Run our grid data through the FNN, get the most likely class
and shape it into a square array again. """
out = fnn.activateOnDataset(griddata)
out = out.argmax(axis=1) # the highest output activation gives the class
out = out.reshape(X.shape)
""" Now plot the test data and the underlying grid as a filled contour. """
figure(1)
ioff() # interactive graphics off
clf() # clear the plot
hold(True) # overplot on
for c in [0,1,2]:
here, _ = where(tstdata['class']==c)
plot(tstdata['input'][here,0],tstdata['input'][here,1],'o')
if out.max()!=out.min(): # safety check against flat field
contourf(X, Y, out) # plot the contour
ion() # interactive graphics on
draw() # update the plot
""" Finally, keep showing the plot until user kills it. """
ioff()
show()
|
DMLoy/ECommerceBasic
|
refs/heads/master
|
lib/python2.7/site-packages/django/utils/http.py
|
27
|
from __future__ import unicode_literals
import calendar
import datetime
import re
import sys
try:
from urllib import parse as urllib_parse
except ImportError: # Python 2
import urllib as urllib_parse
import urlparse
urllib_parse.urlparse = urlparse.urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(urllib_parse.unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib_parse.urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if not six.PY3 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if not six.PY3:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host).
Always returns ``False`` on an empty url.
"""
if not url:
return False
netloc = urllib_parse.urlparse(url)[1]
return not netloc or netloc == host
|
jumpstarter-io/neutron
|
refs/heads/master
|
neutron/tests/unit/cisco/l3/__init__.py
|
12133432
| |
adam-iris/mailman
|
refs/heads/master
|
src/mailman/archiving/docs/__init__.py
|
12133432
| |
rdelhommer/BlocklyPrototypeEB
|
refs/heads/master
|
i18n/dedup_json.py
|
204
|
#!/usr/bin/python
# Consolidates duplicate key-value pairs in a JSON file.
# If the same key is used with different values, no warning is given,
# and there is no guarantee about which key-value pair will be output.
# There is also no guarantee as to the order of the key-value pairs
# output.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import json
from common import InputError
def main():
"""Parses arguments and iterates over files.
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: Input JSON could not be parsed.
"""
# Set up argument parser.
parser = argparse.ArgumentParser(
description='Removes duplicate key-value pairs from JSON files.')
parser.add_argument('--suffix', default='',
help='optional suffix for output files; '
'if empty, files will be changed in place')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
# Iterate over files.
for filename in args.files:
# Read in json using Python libraries. This eliminates duplicates.
print('Processing ' + filename + '...')
try:
with codecs.open(filename, 'r', 'utf-8') as infile:
j = json.load(infile)
except ValueError, e:
print('Error reading ' + filename)
raise InputError(file, str(e))
# Built up output strings as an array to make output of delimiters easier.
output = []
for key in j:
if key != '@metadata':
output.append('\t"' + key + '": "' +
j[key].replace('\n', '\\n') + '"')
# Output results.
with codecs.open(filename + args.suffix, 'w', 'utf-8') as outfile:
outfile.write('{\n')
outfile.write(',\n'.join(output))
outfile.write('\n}\n')
if __name__ == '__main__':
main()
|
UManPychron/pychron
|
refs/heads/develop
|
pychron/entry/irradiated_position.py
|
2
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, Str, Float, Bool, Int, Property, CStr
from traitsui.tabular_adapter import TabularAdapter
from pychron.pychron_constants import PLUSMINUS
# ============= standard library imports ========================
# ============= local library imports ==========================
class BaseIrradiatedPosition(HasTraits):
identifier = Str
material = Str
sample = Str
grainsize = Str
hole = Int
alt_hole = Int
project = Str
principal_investigator = Str
level = Str
irradiation = Str
igsn = Str
j = Float(0)
j_err = Float(0)
pred_j = Float
pred_j_err = Float
x = Float
y = Float
residual = Property(depends_on='j,pred_j')
use = Bool
save = Bool
def __init__(self, pos=None, *args, **kw):
super(BaseIrradiatedPosition, self).__init__(*args, **kw)
if pos is not None:
self.x, self.y = pos
def _get_residual(self):
pe = 0
if self.pred_j:
try:
pe = abs(self.j - self.pred_j) / self.j * 100
except ZeroDivisionError:
pe = 0
return pe
class IrradiatedPosition(BaseIrradiatedPosition):
size = Str
weight = CStr
note = Str
analyzed = Bool
nanalyses = Int
packet = Str
class BaseIrradiatedPositionAdapter(TabularAdapter):
columns = [
('Hole', 'hole'),
('Alt. Hole', 'alt_hole'),
('Labnumber', 'labnumber'),
('Sample', 'sample'),
('Project', 'project'),
('J', 'j'),
(u'{}J'.format(PLUSMINUS), 'j_err'),
('Note', 'note')]
hole_width = Int(45)
class IrradiatedPositionAdapter(TabularAdapter):
columns = [
('', 'analyzed'),
('Hole', 'hole'),
('Packet', 'packet'),
('Identifier', 'identifier'),
('Sample', 'sample'),
('IGSN', 'igsn'),
('PI', 'principal_investigator'),
('Project', 'project'),
('Material', 'material'),
('Grainsize', 'grainsize'),
# ('Size', 'size'),
('Weight', 'weight'),
('J', 'j'),
(u'{}J'.format(PLUSMINUS), 'j_err'),
('Note', 'note')]
igsn_width = Int(70)
identifier_width = Int(80)
hole_width = Int(50)
packet_width = Int(100)
sample_width = Int(100)
project_width = Int(150)
material_width = Int(100)
grainsize_width = Int(70)
size_width = Int(50)
weight_width = Int(50)
j_width = Int(75)
j_err_width = Int(75)
analyzed_text = Property
j_text = Property
j_err_text = Property
font = 'arial 10'
# hole_can_edit = False
# def _get_hole_width(self):
# return 35
def get_tooltip(self, obj, trait, row, column):
name = self.column_map[column]
if name == 'analyzed':
item = getattr(obj, trait)[row]
return 'N Analyses: {}'.format(item.nanalyses)
def _get_analyzed_text(self):
return 'X' if self.item.analyzed else ''
def _set_analyzed_text(self):
pass
def _set_j_text(self, t):
self.item.j = float(t)
def _set_j_err_text(self, t):
self.item.j_err = t
def _get_j_text(self):
return '{:0.6E}'.format(self.item.j)
def _get_j_err_text(self):
return '{:0.6E}'.format(self.item.j_err)
def get_bg_color(self, obj, trait, row, column):
item = getattr(obj, trait)[row]
if item.analyzed:
return '#B0C4DE'
# ============= EOF =============================================
|
163gal/Time-Line
|
refs/heads/master
|
libs/wx/tools/Editra/src/syntax/_groovy.py
|
3
|
###############################################################################
# Name: groovy.py #
# Purpose: Define Groovy syntax for highlighting and other features #
# Author: Omar Gomez <omar.gomez@gmail.com> #
# Copyright: (c) 2009 Omar Gomez <omar.gomez@gmail.com> #
# License: wxWindows License #
###############################################################################
"""
FILE: groovy.py
AUTHOR: Omar Gomez
@summary: Lexer configuration module for Groovy (based on the Java one).
"""
__author__ = "Omar Gomez <omar.gomez@gmail.com>"
__svnid__ = "$Id: _groovy.py 63834 2010-04-03 06:04:33Z CJP $"
__revision__ = "$Revision: 63834 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
from _cpp import AutoIndenter
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
MAIN_KEYWORDS = (0,
"""
as assert Boolean Byte Character Class Double Float Integer Long Number Object
Short String property void abstract assert boolean break byte case catch char
class const continue default do double else extends false final finally float
for goto if implements import instanceof in int interface long native new null
package private protected public return short static strictfp super switch
synchronized this throw throws transient true try void volatile while def
"""
)
SECONDARY_KEYWORDS= (1,
"""
abs accept allProperties and any append asImmutable asSynchronized asWritable
center collect compareTo contains count decodeBase64 div dump each eachByte
eachFile eachFileRecurse eachLine eachMatch eachProperty eachPropertyName
eachWithIndex encodeBase64 every execute filterLine find findAll findIndexOf
flatten getErr getIn getOut getText inject inspect intersect intdiv invokeMethod
isCase join leftShift max min minus mod multiply negate newInputStream
newOutputStream newPrintWriter newReader newWriter next or padLeft padRight
plus pop previous print println readBytes readLine readLines reverse
reverseEach rightShift rightShiftUnsigned round size sort splitEachLine step
subMap times toDouble toFloat toInteger tokenize toList toLong toURL
transformChar transformLine upto use waitForOrKill withInputStream
withOutputStream withPrintWriter withReader withStream withStreams withWriter
withWriterAppend write writeLine
"""
)
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_C_DEFAULT, 'default_style'),
(stc.STC_C_COMMENT, 'comment_style'),
(stc.STC_C_COMMENTDOC, 'comment_style'),
(stc.STC_C_COMMENTDOCKEYWORD, 'dockey_style'),
(stc.STC_C_COMMENTDOCKEYWORDERROR, 'error_style'),
(stc.STC_C_COMMENTLINE, 'comment_style'),
(stc.STC_C_COMMENTLINEDOC, 'comment_style'),
(stc.STC_C_CHARACTER, 'char_style'),
(stc.STC_C_GLOBALCLASS, 'global_style'),
(stc.STC_C_IDENTIFIER, 'default_style'),
(stc.STC_C_NUMBER, 'number_style'),
(stc.STC_C_OPERATOR, 'operator_style'),
(stc.STC_C_PREPROCESSOR, 'pre_style'),
(stc.STC_C_REGEX, 'pre_style'),
(stc.STC_C_STRING, 'string_style'),
(stc.STC_C_STRINGEOL, 'stringeol_style'),
(stc.STC_C_UUID, 'pre_style'),
(stc.STC_C_VERBATIM, 'number2_style'),
(stc.STC_C_WORD, 'keyword_style'),
(stc.STC_C_WORD2, 'keyword2_style') ]
#---- Extra Properties ----#
FOLD = ("fold", "1")
FOLD_PRE = ("styling.within.preprocessor", "0")
FOLD_COM = ("fold.comment", "1")
FOLD_COMP = ("fold.compact", "1")
FOLD_ELSE = ("fold.at.else", "0")
#------------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Groovy"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_CPP)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [MAIN_KEYWORDS, SECONDARY_KEYWORDS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD, FOLD_PRE]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [ u'//' ]
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/validators/parcoords/line/colorbar/tickfont/_family.py
|
1
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="parcoords.line.colorbar.tickfont",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
|
bjoshua/ansible
|
refs/heads/devel
|
samples/lookup_pipe.py
|
255
|
- hosts: localhost
gather_facts: no
tasks:
- debug: msg="the date is {{ lookup('pipe', 'date') }}"
|
ga7g08/sympy
|
refs/heads/master
|
sympy/series/tests/test_limitseq.py
|
62
|
from sympy import symbols, oo, Sum, harmonic, Add, S, binomial, factorial
from sympy.series.limitseq import limit_seq
from sympy.series.limitseq import difference_delta as dd
from sympy.utilities.pytest import raises, XFAIL
n, m, k = symbols('n m k', integer=True)
def test_difference_delta():
e = n*(n + 1)
e2 = e * k
assert dd(e) == 2*n + 2
assert dd(e2, n, 2) == k*(4*n + 6)
raises(ValueError, lambda: dd(e2))
raises(ValueError, lambda: dd(e2, n, oo))
def test_difference_delta__Sum():
e = Sum(1/k, (k, 1, n))
assert dd(e, n) == 1/(n + 1)
assert dd(e, n, 5) == Add(*[1/(i + n + 1) for i in range(5)])
e = Sum(1/k, (k, 1, 3*n))
assert dd(e, n) == Add(*[1/(i + 3*n + 1) for i in range(3)])
e = n * Sum(1/k, (k, 1, n))
assert dd(e, n) == 1 + Sum(1/k, (k, 1, n))
e = Sum(1/k, (k, 1, n), (m, 1, n))
assert dd(e, n) == harmonic(n)
def test_difference_delta__Add():
e = n + n*(n + 1)
assert dd(e, n) == 2*n + 3
assert dd(e, n, 2) == 4*n + 8
e = n + Sum(1/k, (k, 1, n))
assert dd(e, n) == 1 + 1/(n + 1)
assert dd(e, n, 5) == 5 + Add(*[1/(i + n + 1) for i in range(5)])
def test_difference_delta__Pow():
e = 4**n
assert dd(e, n) == 3*4**n
assert dd(e, n, 2) == 15*4**n
e = 4**(2*n)
assert dd(e, n) == 15*4**(2*n)
assert dd(e, n, 2) == 255*4**(2*n)
e = n**4
assert dd(e, n) == (n + 1)**4 - n**4
e = n**n
assert dd(e, n) == (n + 1)**(n + 1) - n**n
def test_limit_seq():
e = binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n))
assert limit_seq(e) == S(3) / 4
assert limit_seq(e, m) == e
e = (5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5)
assert limit_seq(e, n) == S(5) / 3
e = (harmonic(n) * Sum(harmonic(k), (k, 1, n))) / (n * harmonic(2*n)**2)
assert limit_seq(e, n) == 1
e = Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n)
assert limit_seq(e, n) == 4
e = (Sum(binomial(3*k, k) * binomial(5*k, k), (k, 1, n)) /
(binomial(3*n, n) * binomial(5*n, n)))
assert limit_seq(e, n) == S(84375) / 83351
e = Sum(harmonic(k)**2/k, (k, 1, 2*n)) / harmonic(n)**3
assert limit_seq(e, n) == S(1) / 3
raises(ValueError, lambda: limit_seq(e * m))
@XFAIL
def test_limit_seq_fail():
# improve Summation algorithm or add ad-hoc criteria
e = (harmonic(n)**3 * Sum(1/harmonic(k), (k, 1, n)) /
(n * Sum(harmonic(k)/k, (k, 1, n))))
assert limit_seq(e, n) == 2
# No unique dominant term
e = (Sum(2**k * binomial(2*k, k) / k**2, (k, 1, n)) /
(Sum(2**k/k*2, (k, 1, n)) * Sum(binomial(2*k, k), (k, 1, n))))
assert limit_seq(e, n) == S(3) / 7
# Simplifications of summations needs to be improved.
e = n**3*Sum(2**k/k**2, (k, 1, n))**2 / (2**n * Sum(2**k/k, (k, 1, n)))
assert limit_seq(e, n) == 2
e = (harmonic(n) * Sum(2**k/k, (k, 1, n)) /
(n * Sum(2**k*harmonic(k)/k**2, (k, 1, n))))
assert limit_seq(e, n) == 1
e = (Sum(2**k*factorial(k) / k**2, (k, 1, 2*n)) /
(Sum(4**k/k**2, (k, 1, n)) * Sum(factorial(k), (k, 1, 2*n))))
assert limit_seq(e, n) == S(3) / 16
|
jriguera/csv2xmlgen
|
refs/heads/master
|
lib/toolbox/__init__.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py: defines this directory as the 'toolbox' package.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright © 2008 Jose Riguera Lopez <jriguera@gmail.com>
#
"""
Miscellaneous utility functions ...
Some of this functions are from "Dive Into Python", a free Python book for
experienced programmers, other from Internet ...
Thanks to:
http://diveintopython.org/ Mark Pilgrim <f8dy@diveintopython.org>
"""
__author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__version__ = "0.3.0"
__date__ = "April 2010"
__license__ = "GPL (v3 or later)"
__copyright__ ="(c) Jose Riguera"
__package_name__ = "toolbox"
__package_revision__ = '0'
__package_version__ = '0.3.0'
__package_released__ = "October 2008"
__package_author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__package_license__ = "GPL (v2 or later)"
__package_copyright__ ="(c) Jose Riguera, October 2008"
__all__ = ["iosys", "exceptions", "string", "i18n"]
from iosys import *
from exceptions import *
from string import *
from i18n import *
# EOF
|
basicthinker/ThyNVM
|
refs/heads/master
|
src/mem/cache/tags/Tags.py
|
39
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Prakash Ramrakhyani
from m5.params import *
from m5.proxy import *
from ClockedObject import ClockedObject
class BaseTags(ClockedObject):
type = 'BaseTags'
abstract = True
cxx_header = "mem/cache/tags/base.hh"
# Get the size from the parent (cache)
size = Param.MemorySize(Parent.size, "capacity in bytes")
# Get the block size from the parent (system)
block_size = Param.Int(Parent.cache_line_size, "block size in bytes")
# Get the hit latency from the parent (cache)
hit_latency = Param.Cycles(Parent.hit_latency,
"The hit latency for this cache")
class BaseSetAssoc(BaseTags):
type = 'BaseSetAssoc'
abstract = True
cxx_header = "mem/cache/tags/base_set_assoc.hh"
assoc = Param.Int(Parent.assoc, "associativity")
sequential_access = Param.Bool(Parent.sequential_access,
"Whether to access tags and data sequentially")
class LRU(BaseSetAssoc):
type = 'LRU'
cxx_class = 'LRU'
cxx_header = "mem/cache/tags/lru.hh"
class RandomRepl(BaseSetAssoc):
type = 'RandomRepl'
cxx_class = 'RandomRepl'
cxx_header = "mem/cache/tags/random_repl.hh"
class FALRU(BaseTags):
type = 'FALRU'
cxx_class = 'FALRU'
cxx_header = "mem/cache/tags/fa_lru.hh"
|
pbanaszkiewicz/amy
|
refs/heads/develop
|
amy/workshops/apps.py
|
1
|
from django.apps import AppConfig
from django.db.models.signals import m2m_changed
def trainingrequest_m2m_changed(sender, **kwargs):
"""Signal receiver for TrainingRequest m2m_changed signal.
The purpose of this receiver is to react on `TrainingRequest.domains` and
`TrainingRequest.previous_involvement` change and recalculate request's
automatic score, which depends on these M2M fields.
Originally calculation takes place in model's `save` method, but
it was being called before M2M fields changed."""
# react only on "post_add"/"post_remove", forward (not reverse) action
action = kwargs.get("action", "")
forward = not kwargs.get("reverse", True)
instance = kwargs.get("instance", None)
using = kwargs.get("using")
# There's a catch - we can alter the relation from a different side, ie.
# from KnowledgeDomain.trainingrequest_set, but it's harder to recalculate
# because we'd have to make N recalculations. Therefore we only allow
# `forward` direction.
if instance and forward and action in ["post_add", "post_remove"]:
# recalculation happens in `save()` method
instance.save(using=using)
class WorkshopsConfig(AppConfig):
name = "workshops"
label = "workshops"
verbose_name = "Workshops"
def ready(self):
# connect m2m_changed signal for TrainingRequest.domains to calculate
# score_auto
TrainingRequest = self.get_model("TrainingRequest")
m2m_changed.connect(
trainingrequest_m2m_changed,
sender=TrainingRequest.domains.through,
)
m2m_changed.connect(
trainingrequest_m2m_changed,
sender=TrainingRequest.previous_involvement.through,
)
from workshops import receivers # noqa
|
sylvestre/treeherder
|
refs/heads/master
|
treeherder/etl/management/commands/ingest_push.py
|
3
|
from cProfile import Profile
from optparse import make_option
from django.conf import settings
from django.core.management.base import (BaseCommand,
CommandError)
from treeherder.etl.buildapi import (Builds4hJobsProcess,
PendingJobsProcess,
RunningJobsProcess)
from treeherder.etl.pushlog import HgPushlogProcess
from treeherder.model.derived import RefDataManager
class Command(BaseCommand):
"""Management command to ingest data from a single push."""
help = "Ingests a single push into treeherder"
args = '<project> <changeset>'
option_list = BaseCommand.option_list + (
make_option('--profile-file',
action='store',
dest='profile_file',
default=None,
help='Profile command and write result to profile file'),
make_option('--filter-job-group',
action='store',
dest='filter_job_group',
default=None,
help="Only process jobs in specified group symbol "
"(e.g. 'T')")
)
def _handle(self, *args, **options):
if len(args) != 2:
raise CommandError("Need to specify (only) branch and changeset")
(project, changeset) = args
# get reference to repo
rdm = RefDataManager()
repos = filter(lambda x: x['name'] == project,
rdm.get_all_repository_info())
if not repos:
raise CommandError("No project found named '%s'" % project)
repo = repos[0]
# make sure all tasks are run synchronously / immediately
settings.CELERY_ALWAYS_EAGER = True
# get hg pushlog
pushlog_url = '%s/json-pushes/?full=1&version=2' % repo['url']
# ingest this particular revision for this project
process = HgPushlogProcess()
# Use the actual push SHA, in case the changeset specified was a tag
# or branch name (eg tip). HgPushlogProcess returns the full SHA.
push_sha = process.run(pushlog_url, project, changeset=changeset)[:12]
Builds4hJobsProcess().run(filter_to_project=project,
filter_to_revision=push_sha,
filter_to_job_group=options['filter_job_group'])
PendingJobsProcess().run(filter_to_project=project,
filter_to_revision=push_sha,
filter_to_job_group=options['filter_job_group'])
RunningJobsProcess().run(filter_to_project=project,
filter_to_revision=push_sha,
filter_to_job_group=options['filter_job_group'])
def handle(self, *args, **options):
if options['profile_file']:
profiler = Profile()
profiler.runcall(self._handle, *args, **options)
profiler.dump_stats(options['profile_file'])
else:
self._handle(*args, **options)
|
alrifqi/django
|
refs/heads/master
|
tests/app_loading/not_installed/models.py
|
161
|
from django.db import models
class NotInstalledModel(models.Model):
class Meta:
app_label = 'not_installed'
class RelatedModel(models.Model):
class Meta:
app_label = 'not_installed'
not_installed = models.ForeignKey(NotInstalledModel)
class M2MRelatedModel(models.Model):
class Meta:
app_label = 'not_installed'
not_installed = models.ManyToManyField(NotInstalledModel)
|
mjudsp/Tsallis
|
refs/heads/master
|
sklearn/metrics/tests/__init__.py
|
12133432
| |
prarthitm/edxplatform
|
refs/heads/master
|
openedx/core/djangoapps/crawlers/tests.py
|
12133432
| |
arturtamborski/arturtamborskipl
|
refs/heads/master
|
blog/templatetags/__init__.py
|
12133432
| |
mbauskar/omnitech-erpnext
|
refs/heads/master
|
erpnext/accounts/print_format/pos_invoice/__init__.py
|
12133432
| |
gangadhar-kadam/sms-erpnext
|
refs/heads/master
|
setup/doctype/currency/test_currency.py
|
6
|
# pre loaded
test_records = []
|
wang16/swtoolkit
|
refs/heads/master
|
site_scons/site_tools/component_bits.py
|
27
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Environment bit support for software construction toolkit.
This module is automatically included by the component_setup tool.
"""
import __builtin__
import types
import SCons
_bit_descriptions = {}
_bits_with_options = set()
_bit_exclusive_groups = {}
#------------------------------------------------------------------------------
def _CheckDeclared(bits):
"""Checks each of the bits to make sure it's been declared.
Args:
bits: List of bits to check.
Raises:
ValueError: A bit has not been declared.
"""
for bit in bits:
if bit not in _bit_descriptions:
raise ValueError('Bit "%s" used before DeclareBit()' %
bit)
def _CheckExclusive(already_set, proposed):
"""Checks if setting proposed bits would violate any exclusive groups.
Args:
already_set: List of bits already set.
proposed: List of bits attempting to be set.
Raises:
ValueError: A proposed bit belongs to an exclusive group which already has
a bit set.
"""
# Remove any already-set bits from proposed (note this makes a copy of
# proposed so we don't alter the passed list).
proposed = [bit for bit in proposed if bit not in already_set]
for group_name, group_bits in _bit_exclusive_groups.items():
set_match = group_bits.intersection(already_set)
proposed_match = group_bits.intersection(proposed)
if set_match and proposed_match:
raise ValueError('Unable to set bit "%s" because it belongs to the same '
'exclusive group "%s" as already-set bit "%s"' % (
proposed_match.pop(), group_name, set_match.pop()))
#------------------------------------------------------------------------------
def DeclareBit(bit_name, desc, exclusive_groups=None):
"""Declares and describes the bit.
Args:
bit_name: Name of the bit being described.
desc: Description of bit.
exclusive_groups: Bit groups which this bit belongs to. At most one bit
may be set in each exclusive group. May be a string, list of string,
or None.
Raises:
ValueError: The bit has already been defined with a different description,
or the description is empty.
Adds a description for the bit in the global dictionary of bit names. All
bits should be described before being used in Bit()/AllBits()/AnyBits().
"""
if not desc:
raise ValueError('Must supply a description for bit "%s"' % bit_name)
existing_desc = _bit_descriptions.get(bit_name)
if existing_desc and desc != existing_desc:
raise ValueError('Cannot describe bit "%s" as "%s" because it has already'
'been described as "%s".' %
(bit_name, desc, existing_desc))
_bit_descriptions[bit_name] = desc
# Add bit to its exclusive groups
if exclusive_groups:
if type(exclusive_groups) == types.StringType:
exclusive_groups = [exclusive_groups]
for g in exclusive_groups:
if g not in _bit_exclusive_groups:
_bit_exclusive_groups[g] = set()
_bit_exclusive_groups[g].add(bit_name)
#------------------------------------------------------------------------------
def Bit(env, bit_name):
"""Checks if the environment has the bit.
Args:
env: Environment to check.
bit_name: Name of the bit to check.
Returns:
True if the bit is present in the environment.
"""
_CheckDeclared([bit_name])
return bit_name in env['_BITS']
#------------------------------------------------------------------------------
def AllBits(env, *args):
"""Checks if the environment has all the bits.
Args:
env: Environment to check.
args: List of bit names to check.
Returns:
True if every bit listed is present in the environment.
"""
_CheckDeclared(args)
return set(args).issubset(env['_BITS'])
#------------------------------------------------------------------------------
def AnyBits(env, *args):
"""Checks if the environment has at least one of the bits.
Args:
env: Environment to check.
args: List of bit names to check.
Returns:
True if at least one bit listed is present in the environment.
"""
_CheckDeclared(args)
return set(args).intersection(env['_BITS'])
#------------------------------------------------------------------------------
def SetBits(env, *args):
"""Sets the bits in the environment.
Args:
env: Environment to check.
args: List of bit names to set.
"""
_CheckDeclared(args)
_CheckExclusive(env['_BITS'], args)
env['_BITS'] = env['_BITS'].union(args)
#------------------------------------------------------------------------------
def ClearBits(env, *args):
"""Clears the bits in the environment.
Args:
env: Environment to check.
args: List of bit names to clear (remove).
"""
_CheckDeclared(args)
env['_BITS'] = env['_BITS'].difference(args)
#------------------------------------------------------------------------------
def SetBitFromOption(env, bit_name, default):
"""Sets the bit in the environment from a command line option.
Args:
env: Environment to check.
bit_name: Name of the bit to set from a command line option.
default: Default value for bit if command line option is not present.
"""
_CheckDeclared([bit_name])
# Add the command line option, if not already present
if bit_name not in _bits_with_options:
_bits_with_options.add(bit_name)
SCons.Script.AddOption('--' + bit_name,
dest=bit_name,
action='store_true',
help='set bit:' + _bit_descriptions[bit_name])
SCons.Script.AddOption('--no-' + bit_name,
dest=bit_name,
action='store_false',
help='clear bit:' + _bit_descriptions[bit_name])
bit_set = env.GetOption(bit_name)
if bit_set is None:
# Not specified on command line, so use default
bit_set = default
if bit_set:
env['_BITS'].add(bit_name)
elif bit_name in env['_BITS']:
env['_BITS'].remove(bit_name)
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Add methods to builtin
__builtin__.DeclareBit = DeclareBit
# Add methods to environment
env.AddMethod(AllBits)
env.AddMethod(AnyBits)
env.AddMethod(Bit)
env.AddMethod(ClearBits)
env.AddMethod(SetBitFromOption)
env.AddMethod(SetBits)
env['_BITS'] = set()
# Declare bits for common target platforms
DeclareBit('linux', 'Target platform is linux.',
exclusive_groups=('target_platform'))
DeclareBit('mac', 'Target platform is mac.',
exclusive_groups=('target_platform'))
DeclareBit('windows', 'Target platform is windows.',
exclusive_groups=('target_platform'))
# Declare bits for common host platforms
DeclareBit('host_linux', 'Host platform is linux.',
exclusive_groups=('host_platform'))
DeclareBit('host_mac', 'Host platform is mac.',
exclusive_groups=('host_platform'))
DeclareBit('host_windows', 'Host platform is windows.',
exclusive_groups=('host_platform'))
# Declare other common bits from target_ tools
DeclareBit('debug', 'Build is debug, not optimized.')
DeclareBit('posix', 'Target platform is posix.')
# Set the appropriate host platform bit
host_platform_to_bit = {
'MAC': 'host_mac',
'LINUX': 'host_linux',
'WINDOWS': 'host_windows',
}
if HOST_PLATFORM in host_platform_to_bit:
env.SetBits(host_platform_to_bit[HOST_PLATFORM])
|
homeworkprod/byceps
|
refs/heads/master
|
byceps/services/ticketing/event_service.py
|
1
|
"""
byceps.services.ticketing.event_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from typing import List
from ...database import db
from .models.ticket_event import TicketEvent, TicketEventData
from .transfer.models import TicketID
def create_event(event_type: str, ticket_id: TicketID, data: TicketEventData
) -> None:
"""Create a ticket event."""
event = build_event(event_type, ticket_id, data)
db.session.add(event)
db.session.commit()
def build_event(event_type: str, ticket_id: TicketID, data: TicketEventData
) -> TicketEvent:
"""Assemble, but not persist, a ticket event."""
now = datetime.utcnow()
return TicketEvent(now, event_type, ticket_id, data)
def get_events_for_ticket(ticket_id: TicketID) -> List[TicketEvent]:
"""Return the events for that ticket."""
return TicketEvent.query \
.filter_by(ticket_id=ticket_id) \
.order_by(TicketEvent.occurred_at) \
.all()
|
karimbahgat/Pipy
|
refs/heads/master
|
pipy/pip/commands/search.py
|
343
|
from __future__ import absolute_import
import logging
import sys
import textwrap
from pip.basecommand import Command, SUCCESS
from pip.compat import OrderedDict
from pip.download import PipXmlrpcTransport
from pip.models import PyPI
from pip.utils import get_terminal_size
from pip.utils.logging import indent_log
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor import pkg_resources
from pip._vendor.six.moves import xmlrpc_client
logger = logging.getLogger(__name__)
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-i', '--index',
dest='index',
metavar='URL',
default=PyPI.pypi_url,
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
pypi_hits = self.search(query, options)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, options):
index_url = options.index
with self._build_session(options) as session:
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = OrderedDict()
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
return list(packages.values())
def print_results(hits, name_column_width=None, terminal_width=None):
if not hits:
return
if name_column_width is None:
name_column_width = max([
len(hit['name']) + len(hit.get('versions', ['-'])[-1])
for hit in hits
]) + 4
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
version = hit.get('versions', ['-'])[-1]
if terminal_width is not None:
target_width = terminal_width - name_column_width - 5
if target_width > 10:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, target_width)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%-*s - %s' % (name_column_width,
'%s (%s)' % (name, version), summary)
try:
logger.info(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
with indent_log():
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.info('INSTALLED: %s (latest)', dist.version)
else:
logger.info('INSTALLED: %s', dist.version)
logger.info('LATEST: %s', latest)
except UnicodeEncodeError:
pass
def highest_version(versions):
return max(versions, key=parse_version)
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
tools/grit/grit/node/custom/filename.py
|
62
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A CustomType for filenames.'''
from grit import clique
from grit import lazy_re
class WindowsFilename(clique.CustomType):
'''Validates that messages can be used as Windows filenames, and strips
illegal characters out of translations.
'''
BANNED = lazy_re.compile('\+|:|\/|\\\\|\*|\?|\"|\<|\>|\|')
def Validate(self, message):
return not self.BANNED.search(message.GetPresentableContent())
def ValidateAndModify(self, lang, translation):
is_ok = self.Validate(translation)
self.ModifyEachTextPart(lang, translation)
return is_ok
def ModifyTextPart(self, lang, text):
return self.BANNED.sub(' ', text)
|
shams169/pythonProject
|
refs/heads/master
|
ContactsDir/env/lib/python3.6/site-packages/setuptools/site-patch.py
|
356
|
def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys, 'path_importer_cache', {})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
for item in stdpath:
if item == mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site', [item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site', stream, path, descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys, '__egginsert', 0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np == nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__ == 'site':
__boot()
del __boot
|
axaxs/PackageKit-0.8.17
|
refs/heads/master
|
lib/python/packagekit/progress.py
|
5
|
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright (C) 2008
# Richard Hughes <richard@hughsie.com>
class PackagekitProgress:
'''
Progress class there controls the total progress of a transaction
the transaction is divided in n milestones. the class contains
the current step (milestone n -> n+1) and the percentage of the whole transaction
Usage:
from packagekit import PackagekitProgress
steps = [10, 30, 50, 70] # Milestones in %
progress = PackagekitProgress()
progress.set_steps(steps)
for milestone in range(len(steps)):
# do the action is this step
for i in range(100):
# do some action
print "progress : %s " % progress.percent
progress.step() # step to next milestone
'''
#TODO: Add support for elapsed/remaining time
def __init__(self):
self.percent = 0
self.steps = []
self.current_step = 0
def set_steps(self, steps):
'''
Set the steps for the whole transaction
@param steps: list of int representing the percentage of each step in the transaction
'''
self.reset()
self.steps = steps
self.current_step = 0
def reset(self):
self.percent = 0
self.steps = []
self.current_step = 0
def step(self):
'''
Step to the next step in the transaction
'''
if self.current_step < len(self.steps)-1:
self.current_step += 1
self.percent = self.steps[self.current_step]
else:
self.percent = 100
def _update_percent(self):
'''
Increment percentage based on current step
'''
if self.current_step == 0:
startpct = 0
else:
startpct = self.steps[self.current_step-1]
if self.current_step < len(self.steps)-1:
endpct = self.steps[self.current_step+1]
else:
endpct = 100
incr = endpct -startpct
self.percent = startpct + incr
|
enolfc/oauthenticator
|
refs/heads/master
|
oauthenticator/tests/test_google.py
|
1
|
from unittest.mock import Mock
from pytest import fixture, mark, raises
from tornado.web import Application, HTTPError
from ..google import GoogleOAuthenticator, GoogleOAuthHandler
from .mocks import setup_oauth_mock
def user_model(email):
"""Return a user model"""
return {
'email': email,
'hd': email.split('@')[1],
}
import re
@fixture
def google_client(client):
setup_oauth_mock(client,
host=['accounts.google.com', 'www.googleapis.com'],
access_token_path=re.compile('^(/o/oauth2/token|/oauth2/v4/token)$'),
user_path='/oauth2/v1/userinfo',
)
original_handler_for_user = client.handler_for_user
# testing Google is harder because it invokes methods inherited from tornado
# classes
def handler_for_user(user):
mock_handler = original_handler_for_user(user)
mock_handler.request.connection = Mock()
real_handler = GoogleOAuthHandler(
application=Application(hub=mock_handler.hub),
request=mock_handler.request,
)
return real_handler
client.handler_for_user = handler_for_user
return client
@mark.gen_test
def test_google(google_client):
authenticator = GoogleOAuthenticator()
handler = google_client.handler_for_user(user_model('fake@email.com'))
user_info = yield authenticator.authenticate(handler)
assert sorted(user_info) == ['auth_state', 'name']
name = user_info['name']
assert name == 'fake@email.com'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert 'google_user' in auth_state
@mark.gen_test
def test_hosted_domain(google_client):
authenticator = GoogleOAuthenticator(hosted_domain='email.com')
handler = google_client.handler_for_user(user_model('fake@email.com'))#, authenticator)
user_info = yield authenticator.authenticate(handler)
name = user_info['name']
assert name == 'fake'
handler = google_client.handler_for_user(user_model('notallowed@notemail.com'))
with raises(HTTPError) as exc:
name = yield authenticator.authenticate(handler)
assert exc.value.status_code == 403
|
SeanCameronConklin/aima-python
|
refs/heads/master
|
submissions/Porter/vaccuum.py
|
18
|
import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
#ag.Wall: '../images/wall.jpg',
ag.Wall: 'submissions/Porter/dog.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
weimingtom/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/src/gdata/tlslite/X509CertChain.py
|
238
|
"""Class representing an X.509 certificate chain."""
from utils import cryptomath
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True
|
jonhadfield/linkchecker
|
refs/heads/master
|
linkcheck/plugins/viruscheck.py
|
9
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Check page content for virus infection with clamav.
"""
import os
import socket
from . import _ContentPlugin
from .. import log, LOG_PLUGIN
from ..socketutil import create_socket
class VirusCheck(_ContentPlugin):
"""Checks the page content for virus infections with clamav.
A local clamav daemon must be installed."""
def __init__(self, config):
"""Initialize clamav configuration."""
super(VirusCheck, self).__init__(config)
# XXX read config
self.clamav_conf = get_clamav_conf(canonical_clamav_conf())
if not self.clamav_conf:
log.warn(LOG_PLUGIN, "clamav daemon not found for VirusCheck plugin")
def applies_to(self, url_data):
"""Check for clamav and extern."""
return self.clamav_conf and not url_data.extern[0]
def check(self, url_data):
"""Try to ask GeoIP database for country info."""
data = url_data.get_content()
infected, errors = scan(data, self.clamav_conf)
if infected or errors:
for msg in infected:
url_data.add_warning(u"Virus scan infection: %s" % msg)
for msg in errors:
url_data.add_warning(u"Virus scan error: %s" % msg)
else:
url_data.add_info("No viruses in data found.")
@classmethod
def read_config(cls, configparser):
"""Read configuration file options."""
config = dict()
section = cls.__name__
option = "clamavconf"
if configparser.has_option(section, option):
value = configparser.get(section, option)
else:
value = None
config[option] = value
return config
class ClamavError (Exception):
"""Raised on clamav errors."""
pass
class ClamdScanner (object):
"""Virus scanner using a clamd daemon process."""
def __init__ (self, clamav_conf):
"""Initialize clamd daemon process sockets."""
self.infected = []
self.errors = []
self.sock, self.host = clamav_conf.new_connection()
self.sock_rcvbuf = \
self.sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
self.wsock = self.new_scansock()
def new_scansock (self):
"""Return a connected socket for sending scan data to it."""
port = None
try:
self.sock.sendall("STREAM")
port = None
for dummy in range(60):
data = self.sock.recv(self.sock_rcvbuf)
i = data.find("PORT")
if i != -1:
port = int(data[i+5:])
break
except socket.error:
self.sock.close()
raise
if port is None:
raise ClamavError(_("clamd is not ready for stream scanning"))
sockinfo = get_sockinfo(self.host, port=port)
wsock = create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
wsock.connect(sockinfo[0][4])
except socket.error:
wsock.close()
raise
return wsock
def scan (self, data):
"""Scan given data for viruses."""
self.wsock.sendall(data)
def close (self):
"""Get results and close clamd daemon sockets."""
self.wsock.close()
data = self.sock.recv(self.sock_rcvbuf)
while data:
if "FOUND\n" in data:
self.infected.append(data)
if "ERROR\n" in data:
self.errors.append(data)
data = self.sock.recv(self.sock_rcvbuf)
self.sock.close()
def canonical_clamav_conf ():
"""Default clamav configs for various platforms."""
if os.name == 'posix':
clamavconf = "/etc/clamav/clamd.conf"
elif os.name == 'nt':
clamavconf = r"c:\clamav-devel\etc\clamd.conf"
else:
clamavconf = "clamd.conf"
return clamavconf
def get_clamav_conf(filename):
"""Initialize clamav configuration."""
if os.path.isfile(filename):
return ClamavConfig(filename)
log.warn(LOG_PLUGIN, "No ClamAV config file found at %r.", filename)
def get_sockinfo (host, port=None):
"""Return socket.getaddrinfo for given host and port."""
family, socktype = socket.AF_INET, socket.SOCK_STREAM
return socket.getaddrinfo(host, port, family, socktype)
class ClamavConfig (dict):
"""Clamav configuration wrapper, with clamd connection method."""
def __init__ (self, filename):
"""Parse clamav configuration file."""
super(ClamavConfig, self).__init__()
self.parseconf(filename)
if self.get('ScannerDaemonOutputFormat'):
raise ClamavError(_("ScannerDaemonOutputFormat must be disabled"))
if self.get('TCPSocket') and self.get('LocalSocket'):
raise ClamavError(_("only one of TCPSocket and LocalSocket must be enabled"))
def parseconf (self, filename):
"""Parse clamav configuration from given file."""
with open(filename) as fd:
# yet another config format, sigh
for line in fd:
line = line.strip()
if not line or line.startswith("#"):
# ignore empty lines and comments
continue
split = line.split(None, 1)
if len(split) == 1:
self[split[0]] = True
else:
self[split[0]] = split[1]
def new_connection (self):
"""Connect to clamd for stream scanning.
@return: tuple (connected socket, host)
"""
if self.get('LocalSocket'):
host = 'localhost'
sock = self.create_local_socket()
elif self.get('TCPSocket'):
host = self.get('TCPAddr', 'localhost')
sock = self.create_tcp_socket(host)
else:
raise ClamavError(_("one of TCPSocket or LocalSocket must be enabled"))
return sock, host
def create_local_socket (self):
"""Create local socket, connect to it and return socket object."""
sock = create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self['LocalSocket']
try:
sock.connect(addr)
except socket.error:
sock.close()
raise
return sock
def create_tcp_socket (self, host):
"""Create tcp socket, connect to it and return socket object."""
port = int(self['TCPSocket'])
sockinfo = get_sockinfo(host, port=port)
sock = create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(sockinfo[0][4])
except socket.error:
sock.close()
raise
return sock
def scan (data, clamconf):
"""Scan data for viruses.
@return (infection msgs, errors)
@rtype ([], [])
"""
try:
scanner = ClamdScanner(clamconf)
except socket.error:
errmsg = _("Could not connect to ClamAV daemon.")
return ([], [errmsg])
try:
scanner.scan(data)
finally:
scanner.close()
return scanner.infected, scanner.errors
|
JenSte/pyqtgraph
|
refs/heads/develop
|
examples/parallelize.py
|
22
|
# -*- coding: utf-8 -*-
import initExample ## Add path to library (just for examples; you do not need this)
import time
import numpy as np
import pyqtgraph.multiprocess as mp
import pyqtgraph as pg
from pyqtgraph.python2_3 import xrange
print( "\n=================\nParallelize")
## Do a simple task:
## for x in range(N):
## sum([x*i for i in range(M)])
##
## We'll do this three times
## - once without Parallelize
## - once with Parallelize, but forced to use a single worker
## - once with Parallelize automatically determining how many workers to use
##
tasks = range(10)
results = [None] * len(tasks)
results2 = results[:]
results3 = results[:]
size = 2000000
pg.mkQApp()
### Purely serial processing
start = time.time()
with pg.ProgressDialog('processing serially..', maximum=len(tasks)) as dlg:
for i, x in enumerate(tasks):
tot = 0
for j in xrange(size):
tot += j * x
results[i] = tot
dlg += 1
if dlg.wasCanceled():
raise Exception('processing canceled')
print( "Serial time: %0.2f" % (time.time() - start))
### Use parallelize, but force a single worker
### (this simulates the behavior seen on windows, which lacks os.fork)
start = time.time()
with mp.Parallelize(enumerate(tasks), results=results2, workers=1, progressDialog='processing serially (using Parallelizer)..') as tasker:
for i, x in tasker:
tot = 0
for j in xrange(size):
tot += j * x
tasker.results[i] = tot
print( "\nParallel time, 1 worker: %0.2f" % (time.time() - start))
print( "Results match serial: %s" % str(results2 == results))
### Use parallelize with multiple workers
start = time.time()
with mp.Parallelize(enumerate(tasks), results=results3, progressDialog='processing in parallel..') as tasker:
for i, x in tasker:
tot = 0
for j in xrange(size):
tot += j * x
tasker.results[i] = tot
print( "\nParallel time, %d workers: %0.2f" % (mp.Parallelize.suggestedWorkerCount(), time.time() - start))
print( "Results match serial: %s" % str(results3 == results))
|
matrixise/odoo
|
refs/heads/8.0
|
addons/delivery/delivery.py
|
72
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
class delivery_carrier(osv.osv):
_name = "delivery.carrier"
_description = "Carrier"
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
if context is None:
context = {}
order_id = context.get('order_id',False)
if not order_id:
res = super(delivery_carrier, self).name_get(cr, uid, ids, context=context)
else:
order = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
currency = order.pricelist_id.currency_id.name or ''
res = [(r['id'], r['name']+' ('+(str(r['price']))+' '+currency+')') for r in self.read(cr, uid, ids, ['name', 'price'], context)]
return res
def get_price(self, cr, uid, ids, field_name, arg=None, context=None):
res={}
if context is None:
context = {}
sale_obj=self.pool.get('sale.order')
grid_obj=self.pool.get('delivery.grid')
for carrier in self.browse(cr, uid, ids, context=context):
order_id=context.get('order_id',False)
price=False
available = False
if order_id:
order = sale_obj.browse(cr, uid, order_id, context=context)
carrier_grid=self.grid_get(cr,uid,[carrier.id],order.partner_shipping_id.id,context)
if carrier_grid:
try:
price=grid_obj.get_price(cr, uid, carrier_grid, order, time.strftime('%Y-%m-%d'), context)
available = True
except osv.except_osv, e:
# no suitable delivery method found, probably configuration error
_logger.error("Carrier %s: %s\n%s" % (carrier.name, e.name, e.value))
price = 0.0
else:
price = 0.0
res[carrier.id] = {
'price': price,
'available': available
}
return res
_columns = {
'name': fields.char('Delivery Method', required=True),
'partner_id': fields.many2one('res.partner', 'Transport Company', required=True, help="The partner that is doing the delivery service."),
'product_id': fields.many2one('product.product', 'Delivery Product', required=True),
'grids_id': fields.one2many('delivery.grid', 'carrier_id', 'Delivery Grids'),
'available' : fields.function(get_price, string='Available',type='boolean', multi='price',
help="Is the carrier method possible with the current order."),
'price' : fields.function(get_price, string='Price', multi='price'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery carrier without removing it."),
'normal_price': fields.float('Normal Price', help="Keep empty if the pricing depends on the advanced pricing per destination"),
'free_if_more_than': fields.boolean('Free If Order Total Amount Is More Than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping"),
'amount': fields.float('Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency"),
'use_detailed_pricelist': fields.boolean('Advanced Pricing per Destination', help="Check this box if you want to manage delivery prices that depends on the destination, the weight, the total of the order, etc."),
'pricelist_ids': fields.one2many('delivery.grid', 'carrier_id', 'Advanced Pricing'),
}
_defaults = {
'active': 1,
'free_if_more_than': False,
}
def grid_get(self, cr, uid, ids, contact_id, context=None):
contact = self.pool.get('res.partner').browse(cr, uid, contact_id, context=context)
for carrier in self.browse(cr, uid, ids, context=context):
for grid in carrier.grids_id:
get_id = lambda x: x.id
country_ids = map(get_id, grid.country_ids)
state_ids = map(get_id, grid.state_ids)
if country_ids and not contact.country_id.id in country_ids:
continue
if state_ids and not contact.state_id.id in state_ids:
continue
if grid.zip_from and (contact.zip or '')< grid.zip_from:
continue
if grid.zip_to and (contact.zip or '')> grid.zip_to:
continue
return grid.id
return False
def create_grid_lines(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
grid_line_pool = self.pool.get('delivery.grid.line')
grid_pool = self.pool.get('delivery.grid')
for record in self.browse(cr, uid, ids, context=context):
# if using advanced pricing per destination: do not change
if record.use_detailed_pricelist:
continue
# not using advanced pricing per destination: override grid
grid_id = grid_pool.search(cr, uid, [('carrier_id', '=', record.id)], context=context)
if grid_id and not (record.normal_price or record.free_if_more_than):
grid_pool.unlink(cr, uid, grid_id, context=context)
# Check that float, else 0.0 is False
if not (isinstance(record.normal_price,float) or record.free_if_more_than):
continue
if not grid_id:
grid_data = {
'name': record.name,
'carrier_id': record.id,
'sequence': 10,
}
grid_id = [grid_pool.create(cr, uid, grid_data, context=context)]
lines = grid_line_pool.search(cr, uid, [('grid_id','in',grid_id)], context=context)
if lines:
grid_line_pool.unlink(cr, uid, lines, context=context)
#create the grid lines
if record.free_if_more_than:
line_data = {
'grid_id': grid_id and grid_id[0],
'name': _('Free if more than %.2f') % record.amount,
'type': 'price',
'operator': '>=',
'max_value': record.amount,
'standard_price': 0.0,
'list_price': 0.0,
}
grid_line_pool.create(cr, uid, line_data, context=context)
if isinstance(record.normal_price,float):
line_data = {
'grid_id': grid_id and grid_id[0],
'name': _('Default price'),
'type': 'price',
'operator': '>=',
'max_value': 0.0,
'standard_price': record.normal_price,
'list_price': record.normal_price,
}
grid_line_pool.create(cr, uid, line_data, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int,long)):
ids = [ids]
res = super(delivery_carrier, self).write(cr, uid, ids, vals, context=context)
self.create_grid_lines(cr, uid, ids, vals, context=context)
return res
def create(self, cr, uid, vals, context=None):
res_id = super(delivery_carrier, self).create(cr, uid, vals, context=context)
self.create_grid_lines(cr, uid, [res_id], vals, context=context)
return res_id
class delivery_grid(osv.osv):
_name = "delivery.grid"
_description = "Delivery Grid"
_columns = {
'name': fields.char('Grid Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of delivery grid."),
'carrier_id': fields.many2one('delivery.carrier', 'Carrier', required=True, ondelete='cascade'),
'country_ids': fields.many2many('res.country', 'delivery_grid_country_rel', 'grid_id', 'country_id', 'Countries'),
'state_ids': fields.many2many('res.country.state', 'delivery_grid_state_rel', 'grid_id', 'state_id', 'States'),
'zip_from': fields.char('Start Zip', size=12),
'zip_to': fields.char('To Zip', size=12),
'line_ids': fields.one2many('delivery.grid.line', 'grid_id', 'Grid Line', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery grid without removing it."),
}
_defaults = {
'active': lambda *a: 1,
'sequence': lambda *a: 1,
}
_order = 'sequence'
def get_price(self, cr, uid, id, order, dt, context=None):
total = 0
weight = 0
volume = 0
quantity = 0
product_uom_obj = self.pool.get('product.uom')
for line in order.order_line:
if not line.product_id or line.is_delivery:
continue
q = product_uom_obj._compute_qty(cr, uid, line.product_uom.id, line.product_uom_qty, line.product_id.uom_id.id)
weight += (line.product_id.weight or 0.0) * q
volume += (line.product_id.volume or 0.0) * q
quantity += q
total = order.amount_total or 0.0
return self.get_price_from_picking(cr, uid, id, total,weight, volume, quantity, context=context)
def get_price_from_picking(self, cr, uid, id, total, weight, volume, quantity, context=None):
grid = self.browse(cr, uid, id, context=context)
price = 0.0
ok = False
price_dict = {'price': total, 'volume':volume, 'weight': weight, 'wv':volume*weight, 'quantity': quantity}
for line in grid.line_ids:
test = eval(line.type+line.operator+str(line.max_value), price_dict)
if test:
if line.price_type=='variable':
price = line.list_price * price_dict[line.variable_factor]
else:
price = line.list_price
ok = True
break
if not ok:
raise osv.except_osv(_("Unable to fetch delivery method!"), _("Selected product in the delivery method doesn't fulfill any of the delivery grid(s) criteria."))
return price
class delivery_grid_line(osv.osv):
_name = "delivery.grid.line"
_description = "Delivery Grid Line"
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when calculating delivery grid."),
'grid_id': fields.many2one('delivery.grid', 'Grid',required=True, ondelete='cascade'),
'type': fields.selection([('weight','Weight'),('volume','Volume'),\
('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity')],\
'Variable', required=True),
'operator': fields.selection([('==','='),('<=','<='),('<','<'),('>=','>='),('>','>')], 'Operator', required=True),
'max_value': fields.float('Maximum Value', required=True),
'price_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Price Type', required=True),
'variable_factor': fields.selection([('weight','Weight'),('volume','Volume'),('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity')], 'Variable Factor', required=True),
'list_price': fields.float('Sale Price', digits_compute= dp.get_precision('Product Price'), required=True),
'standard_price': fields.float('Cost Price', digits_compute= dp.get_precision('Product Price'), required=True),
}
_defaults = {
'sequence': lambda *args: 10,
'type': lambda *args: 'weight',
'operator': lambda *args: '<=',
'price_type': lambda *args: 'fixed',
'variable_factor': lambda *args: 'weight',
}
_order = 'sequence, list_price'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
alshedivat/tensorflow
|
refs/heads/master
|
tensorflow/contrib/factorization/python/ops/gmm_ops.py
|
13
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.to_float(array_ops.shape(x)[0])
x -= math_ops.reduce_mean(x, 0, keepdims=True)
if diag:
cov = math_ops.reduce_sum(
math_ops.square(x), 0, keepdims=True) / (num_points - 1)
else:
cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
with ops.control_dependencies(
[check_ops.assert_less_equal(num_clusters, num_data)]):
indices = random_ops.random_uniform(
[num_clusters],
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=random_seed,
dtype=dtypes.int64)
indices %= math_ops.cast(num_data, dtypes.int64)
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_WEIGHT = 'alphas'
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self,
data,
num_classes,
initial_means=None,
params='wmc',
covariance_type=FULL_COVARIANCE,
random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = array_ops.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = array_ops.diag(
array_ops.ones(array_ops.stack([self._dimensions]))) * 1e-3
self._create_variables()
self._initialize_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self):
"""Initializes GMM algorithm."""
init_value = array_ops.constant([], dtype=dtypes.float32)
self._means = variables.VariableV1(init_value,
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
self._covs = variables.VariableV1(
init_value, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = variable_scope.variable(
array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
name=self.CLUSTERS_WEIGHT,
validate_shape=False)
self._cluster_centers_initialized = variables.VariableV1(False,
dtype=dtypes.bool,
name='initialized')
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
def init_ops(self):
"""Returns the initialization operation."""
return control_flow_ops.group(*self._init_ops)
def training_ops(self):
"""Returns the training operation."""
return control_flow_ops.group(*self._train_ops)
def is_initialized(self):
"""Returns a boolean operation for initialized variables."""
return self._cluster_centers_initialized
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(math_ops.argmax(w, 1))
return ret
def scores(self):
"""Returns the per-sample likelihood fo the data.
Returns:
Log probabilities of each data point.
"""
return self._scores
def log_likelihood_op(self):
"""Returns the log-likelihood operation."""
return self._log_likelihood_op
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = array_ops.shape(shard)[0]
shard = array_ops.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_loglikelihood_operation()
self._define_score_samples()
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilities per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
* math_ops.log(2 * np.pi) + log_det_covs)
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = math_ops.reduce_sum(
math_ops.log(self._covs + 1e-3), 1, keepdims=True)
diff = shard - self._means
x2 = math_ops.square(diff)
cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += math_ops.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probability of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
self._probs[shard_id], axis=1, keepdims=True)
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = array_ops.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# $$w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}$$
# $$ {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}$$
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = array_ops.reshape(
math_ops.exp(probs - self._prior_probs[shard_id]),
array_ops.stack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = math_ops.reduce_sum(
self._w[shard_id], 0, keepdims=True)
# Partial means.
w_mul_x = array_ops.expand_dims(
math_ops.matmul(
self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with ops.control_dependencies(self._w):
points_in_k = array_ops.squeeze(
math_ops.add_n(self._points_in_k), axis=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.to_float(math_ops.reduce_sum(final_points_in_k))
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:
self._alpha_op = control_flow_ops.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = array_ops.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
math_ops.div(
math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = control_flow_ops.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with ops.control_dependencies([self._means_op]):
b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.value()[k, :, :]
square_mean = math_ops.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(array_ops.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with ops.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
state_ops.assign(
self._covs, new_covs, validate_shape=False))
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
op = []
for prior_probs in self._prior_probs:
op.append(math_ops.reduce_logsumexp(prior_probs))
self._log_likelihood_op = math_ops.reduce_logsumexp(op)
def _define_score_samples(self):
"""Defines the likelihood of each data sample."""
op = []
for shard_id, prior_probs in enumerate(self._prior_probs):
op.append(prior_probs + math_ops.log(self._w[shard_id]))
self._scores = array_ops.squeeze(
math_ops.reduce_logsumexp(op, axis=2, keepdims=True), axis=0)
def gmm(inp,
initial_clusters,
num_clusters,
random_seed,
covariance_type=FULL_COVARIANCE,
params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
training_op: an op that runs an iteration of training.
init_op: an op that runs the initialization.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(initial_clusters,
ops.Tensor):
initial_means = constant_op.constant(initial_clusters, dtype=dtypes.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
assignments = gmm_tool.assignments()
scores = gmm_tool.scores()
loss = gmm_tool.log_likelihood_op()
return (loss, scores, [assignments], gmm_tool.training_ops(),
gmm_tool.init_ops(), gmm_tool.is_initialized())
|
xavierwu/scikit-learn
|
refs/heads/master
|
sklearn/cluster/tests/test_birch.py
|
342
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
rest-of/the-deck
|
refs/heads/master
|
lambda/lib/python2.7/site-packages/docutils/transforms/parts.py
|
187
|
# $Id: parts.py 6073 2009-08-06 12:21:10Z milde $
# Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer; Dmitry Jemerov
# Copyright: This module has been placed in the public domain.
"""
Transforms related to document parts.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class SectNum(Transform):
"""
Automatically assigns numbers to the titles of document sections.
It is possible to limit the maximum section level for which the numbers
are added. For those sections that are auto-numbered, the "autonum"
attribute is set, informing the contents table generator that a different
form of the TOC should be used.
"""
default_priority = 710
"""Should be applied before `Contents`."""
def apply(self):
self.maxdepth = self.startnode.details.get('depth', None)
self.startvalue = self.startnode.details.get('start', 1)
self.prefix = self.startnode.details.get('prefix', '')
self.suffix = self.startnode.details.get('suffix', '')
self.startnode.parent.remove(self.startnode)
if self.document.settings.sectnum_xform:
if self.maxdepth is None:
self.maxdepth = sys.maxint
self.update_section_numbers(self.document)
else: # store details for eventual section numbering by the writer
self.document.settings.sectnum_depth = self.maxdepth
self.document.settings.sectnum_start = self.startvalue
self.document.settings.sectnum_prefix = self.prefix
self.document.settings.sectnum_suffix = self.suffix
def update_section_numbers(self, node, prefix=(), depth=0):
depth += 1
if prefix:
sectnum = 1
else:
sectnum = self.startvalue
for child in node:
if isinstance(child, nodes.section):
numbers = prefix + (str(sectnum),)
title = child[0]
# Use for spacing:
generated = nodes.generated(
'', (self.prefix + '.'.join(numbers) + self.suffix
+ u'\u00a0' * 3),
classes=['sectnum'])
title.insert(0, generated)
title['auto'] = 1
if depth < self.maxdepth:
self.update_section_numbers(child, numbers, depth)
sectnum += 1
class Contents(Transform):
"""
This transform generates a table of contents from the entire document tree
or from a single branch. It locates "section" elements and builds them
into a nested bullet list, which is placed within a "topic" created by the
contents directive. A title is either explicitly specified, taken from
the appropriate language module, or omitted (local table of contents).
The depth may be specified. Two-way references between the table of
contents and section titles are generated (requires Writer support).
This transform requires a startnode, which contains generation
options and provides the location for the generated table of contents (the
startnode is replaced by the table of contents "topic").
"""
default_priority = 720
def apply(self):
try: # let the writer (or output software) build the contents list?
toc_by_writer = self.document.settings.use_latex_toc
except AttributeError:
toc_by_writer = False
details = self.startnode.details
if 'local' in details:
startnode = self.startnode.parent.parent
while not (isinstance(startnode, nodes.section)
or isinstance(startnode, nodes.document)):
# find the ToC root: a direct ancestor of startnode
startnode = startnode.parent
else:
startnode = self.document
self.toc_id = self.startnode.parent['ids'][0]
if 'backlinks' in details:
self.backlinks = details['backlinks']
else:
self.backlinks = self.document.settings.toc_backlinks
if toc_by_writer:
# move customization settings to the parent node
self.startnode.parent.attributes.update(details)
self.startnode.parent.remove(self.startnode)
else:
contents = self.build_contents(startnode)
if len(contents):
self.startnode.replace_self(contents)
else:
self.startnode.parent.parent.remove(self.startnode.parent)
def build_contents(self, node, level=0):
level += 1
sections = [sect for sect in node if isinstance(sect, nodes.section)]
entries = []
autonum = 0
depth = self.startnode.details.get('depth', sys.maxint)
for section in sections:
title = section[0]
auto = title.get('auto') # May be set by SectNum.
entrytext = self.copy_and_filter(title)
reference = nodes.reference('', '', refid=section['ids'][0],
*entrytext)
ref_id = self.document.set_id(reference)
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
if ( self.backlinks in ('entry', 'top')
and title.next_node(nodes.reference) is None):
if self.backlinks == 'entry':
title['refid'] = ref_id
elif self.backlinks == 'top':
title['refid'] = self.toc_id
if level < depth:
subsects = self.build_contents(section, level)
item += subsects
entries.append(item)
if entries:
contents = nodes.bullet_list('', *entries)
if auto:
contents['classes'].append('auto-toc')
return contents
else:
return []
def copy_and_filter(self, node):
"""Return a copy of a title, with references, images, etc. removed."""
visitor = ContentsFilter(self.document)
node.walkabout(visitor)
return visitor.get_entry_text()
class ContentsFilter(nodes.TreeCopyVisitor):
def get_entry_text(self):
return self.get_tree_copy().children
def visit_citation_reference(self, node):
raise nodes.SkipNode
def visit_footnote_reference(self, node):
raise nodes.SkipNode
def visit_image(self, node):
if node.hasattr('alt'):
self.parent.append(nodes.Text(node['alt']))
raise nodes.SkipNode
def ignore_node_but_process_children(self, node):
raise nodes.SkipDeparture
visit_interpreted = ignore_node_but_process_children
visit_problematic = ignore_node_but_process_children
visit_reference = ignore_node_but_process_children
visit_target = ignore_node_but_process_children
|
amolkahat/pandas
|
refs/heads/master
|
pandas/tests/generic/test_series.py
|
4
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from operator import methodcaller
import pytest
import numpy as np
import pandas as pd
from distutils.version import LooseVersion
from pandas import Series, date_range, MultiIndex
from pandas.compat import range
from pandas.util.testing import (assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from .test_generic import Generic
try:
import xarray
_XARRAY_INSTALLED = True
except ImportError:
_XARRAY_INSTALLED = False
class TestSeries(Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setup_method(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_set_axis_name(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
funcs = ['rename_axis', '_set_axis_name']
name = 'foo'
for func in funcs:
result = methodcaller(func, name)(s)
assert s.index.name is None
assert result.index.name == name
def test_set_axis_name_mi(self):
s = Series([11, 21, 31], index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]],
names=['l1', 'l2'])
)
funcs = ['rename_axis', '_set_axis_name']
for func in funcs:
result = methodcaller(func, ['L1', 'L2'])(s)
assert s.index.name is None
assert s.index.names == ['l1', 'l2']
assert result.index.name is None
assert result.index.names, ['L1', 'L2']
def test_set_axis_name_raises(self):
s = pd.Series([1])
with pytest.raises(ValueError):
s._set_axis_name(name='a', axis=1)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
assert s.bool()
s = Series([False])
assert not s.bool()
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
pytest.raises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
pytest.raises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
pytest.raises(ValueError, lambda: bool(s))
pytest.raises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
pytest.raises(ValueError, lambda: bool(s))
pytest.raises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
assert result.filename == 'foo+bar'
assert result.name is None
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
LooseVersion(xarray.__version__) <
LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index",
['FloatIndex', 'IntIndex',
'StringIndex', 'UnicodeIndex',
'DateIndex', 'PeriodIndex',
'TimedeltaIndex', 'CategoricalIndex'])
def test_to_xarray_index_types(self, index):
from xarray import DataArray
index = getattr(tm, 'make{}'.format(index))
s = Series(range(6), index=index(6))
s.index.name = 'foo'
result = s.to_xarray()
repr(result)
assert len(result) == 6
assert len(result.coords) == 1
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, DataArray)
# idempotency
assert_series_equal(result.to_series(), s,
check_index_type=False,
check_categorical=True)
@td.skip_if_no('xarray', min_version='0.7.0')
def test_to_xarray(self):
from xarray import DataArray
s = Series([])
s.index.name = 'foo'
result = s.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, DataArray)
s = Series(range(6))
s.index.name = 'foo'
s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)],
names=['one', 'two'])
result = s.to_xarray()
assert len(result) == 2
assert_almost_equal(list(result.coords.keys()), ['one', 'two'])
assert isinstance(result, DataArray)
assert_series_equal(result.to_series(), s)
def test_valid_deprecated(self):
# GH18800
with tm.assert_produces_warning(FutureWarning):
pd.Series([]).valid()
@pytest.mark.parametrize("s", [
Series([np.arange(5)]),
pd.date_range('1/1/2011', periods=24, freq='H'),
pd.Series(range(5), index=pd.date_range("2017", periods=5))
])
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, s, shift_size):
# GH22397
assert s.shift(shift_size) is not s
@pytest.mark.parametrize("move_by_freq", [
pd.Timedelta('1D'),
pd.Timedelta('1M'),
])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH22397
s = pd.Series(range(5), index=pd.date_range("2017", periods=5))
assert s.shift(freq=move_by_freq) is not s
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/py-chardet/package.py
|
3
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyChardet(PythonPackage):
"""Universal encoding detector for Python 2 and 3"""
homepage = "https://github.com/chardet/chardet"
url = "https://pypi.io/packages/source/c/chardet/chardet-3.0.4.tar.gz"
version('3.0.4', sha256='84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae')
version('3.0.2', sha256='4f7832e7c583348a9eddd927ee8514b3bf717c061f57b21dbe7697211454d9bb')
version('2.3.0', sha256='e53e38b3a4afe6d1132de62b7400a4ac363452dc5dfcf8d88e8e0cce663c68aa')
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', type='build')
depends_on('py-pytest', type='test')
depends_on('py-hypothesis', type='test')
|
eHealthAfrica/ureport
|
refs/heads/develop
|
ureport/news/migrations/0001_initial.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('categories', '0002_auto_20140820_1415'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orgs', '0005_orgbackground'),
]
operations = [
migrations.CreateModel(
name='NewsItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('title', models.CharField(help_text='The title for this item', max_length=255)),
('description', models.TextField(help_text='A short summary description for this item', null=True, blank=True)),
('link', models.CharField(help_text=b'A link that should be associated with this item', max_length=255)),
('category', models.ForeignKey(help_text='The category this item belongs to', to='categories.Category')),
('created_by', models.ForeignKey(help_text=b'The user which originally created this item', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(help_text=b'The user which last modified this item', to=settings.AUTH_USER_MODEL)),
('org', models.ForeignKey(help_text=b'The organization this item belongs to', to='orgs.Org')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('title', models.CharField(help_text='The title for this Video', max_length=255)),
('description', models.TextField(help_text='A short summary description for this video', null=True, blank=True)),
('video_id', models.CharField(help_text=b'The id of the YouTube video that should be linked to this item', max_length=255)),
('category', models.ForeignKey(help_text='The category this item belongs to', to='categories.Category')),
('created_by', models.ForeignKey(help_text=b'The user which originally created this item', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(help_text=b'The user which last modified this item', to=settings.AUTH_USER_MODEL)),
('org', models.ForeignKey(help_text=b'The organization this video belongs to', to='orgs.Org')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
neilLasrado/frappe
|
refs/heads/develop
|
frappe/core/report/permitted_documents_for_user/__init__.py
|
12133432
| |
yuewko/neutron
|
refs/heads/master
|
neutron/plugins/cisco/service_plugins/__init__.py
|
12133432
| |
openstack-ja/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tables.py
|
3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django.utils.http import urlencode # noqa
from django.utils import safestring
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
import tables as volume_tables
LOG = logging.getLogger(__name__)
class DeleteVolumeSnapshot(tables.DeleteAction):
data_type_singular = _("Volume Snapshot")
data_type_plural = _("Volume Snapshots")
action_past = _("Scheduled deletion of")
def delete(self, request, obj_id):
api.cinder.volume_snapshot_delete(request, obj_id)
class CreateVolumeFromSnapshot(tables.LinkAction):
name = "create_from_snapshot"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:create"
classes = ("ajax-modal", "btn-camera")
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"snapshot_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
return volume.status == "available" if volume else False
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, snapshot_id):
snapshot = cinder.volume_snapshot_get(request, snapshot_id)
return snapshot
class SnapshotVolumeNameColumn(tables.Column):
def get_raw_data(self, snapshot):
request = self.table.request
volume_name = api.cinder.volume_get(request,
snapshot.volume_id).display_name
return safestring.mark_safe(volume_name)
def get_link_url(self, snapshot):
volume_id = api.cinder.volume_get(self.table.request,
snapshot.volume_id).id
return reverse(self.link, args=(volume_id,))
class VolumeSnapshotsTable(volume_tables.VolumesTableBase):
name = tables.Column("display_name",
verbose_name=_("Name"),
link="horizon:project:images_and_snapshots:detail")
volume_name = SnapshotVolumeNameColumn("display_name",
verbose_name=_("Volume Name"),
link="horizon:project:volumes:detail")
class Meta:
name = "volume_snapshots"
verbose_name = _("Volume Snapshots")
table_actions = (DeleteVolumeSnapshot,)
row_actions = (CreateVolumeFromSnapshot, DeleteVolumeSnapshot)
row_class = UpdateRow
status_columns = ("status",)
permissions = ['openstack.services.volume']
|
jchuahtacc/WikiNetworking
|
refs/heads/master
|
wikinetworking/clickinfo.py
|
1
|
import matplotlib
import mpld3
## An mpld3 plugin that allows nodes to be clicked to open a URL
# Source: @link <http://stackoverflow.com/a/28838652/814354>
class ClickInfo(mpld3.plugins.PluginBase):
"""mpld3 Plugin for getting info on click
Comes from:
http://stackoverflow.com/a/28838652/814354
"""
JAVASCRIPT = """
mpld3.register_plugin("clickinfo", ClickInfo);
ClickInfo.prototype = Object.create(mpld3.Plugin.prototype);
ClickInfo.prototype.constructor = ClickInfo;
ClickInfo.prototype.requiredProps = ["id", "urls"];
function ClickInfo(fig, props){
mpld3.Plugin.call(this, fig, props);
};
ClickInfo.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var urls = this.props.urls;
var elems = obj.elements();
elems.on("mousedown",
function(d, i){
window.open(urls[i], '_blank');
});
}
"""
## A constructor for this plugin
# @param points A list of matplotlib objects
# @param urls A corresponding list of URLs
def __init__(self, points, urls):
self.points = points
self.urls = urls
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "clickinfo",
"id": mpld3.utils.get_id(points, suffix),
"urls": urls}
|
f95johansson/marvin-transfer
|
refs/heads/master
|
marvin/appdirs.py
|
1
|
# -*- coding: utf-8 -*-
"""Utilities for determining platform-specific config dirs.
This is a modified version of the original appdirs 1.4.0 module, focusing only
on the config directory. It retains a lot of the original code.
See original repository <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
#
# MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/.config/<AppName>
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/.config')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
#---- internal support stuff (for windows)
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
|
Back2Basics/validictory
|
refs/heads/master
|
setup.py
|
9
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from validictory import __version__
DESCRIPTION = "general purpose python data validator"
LONG_DESCRIPTION = open('README.rst').read()
setup(name='validictory',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='James Turk',
author_email='jturk@sunlightfoundation.com',
url='http://github.com/sunlightlabs/validictory',
license="MIT License",
platforms=["any"],
packages=find_packages(),
test_suite="validictory.tests",
)
|
igorg1312/googlepythonsskeleton
|
refs/heads/master
|
lib/markupsafe/_constants.py
|
1535
|
# -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
Highlevel implementation of the Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
HTML_ENTITIES = {
'AElig': 198,
'Aacute': 193,
'Acirc': 194,
'Agrave': 192,
'Alpha': 913,
'Aring': 197,
'Atilde': 195,
'Auml': 196,
'Beta': 914,
'Ccedil': 199,
'Chi': 935,
'Dagger': 8225,
'Delta': 916,
'ETH': 208,
'Eacute': 201,
'Ecirc': 202,
'Egrave': 200,
'Epsilon': 917,
'Eta': 919,
'Euml': 203,
'Gamma': 915,
'Iacute': 205,
'Icirc': 206,
'Igrave': 204,
'Iota': 921,
'Iuml': 207,
'Kappa': 922,
'Lambda': 923,
'Mu': 924,
'Ntilde': 209,
'Nu': 925,
'OElig': 338,
'Oacute': 211,
'Ocirc': 212,
'Ograve': 210,
'Omega': 937,
'Omicron': 927,
'Oslash': 216,
'Otilde': 213,
'Ouml': 214,
'Phi': 934,
'Pi': 928,
'Prime': 8243,
'Psi': 936,
'Rho': 929,
'Scaron': 352,
'Sigma': 931,
'THORN': 222,
'Tau': 932,
'Theta': 920,
'Uacute': 218,
'Ucirc': 219,
'Ugrave': 217,
'Upsilon': 933,
'Uuml': 220,
'Xi': 926,
'Yacute': 221,
'Yuml': 376,
'Zeta': 918,
'aacute': 225,
'acirc': 226,
'acute': 180,
'aelig': 230,
'agrave': 224,
'alefsym': 8501,
'alpha': 945,
'amp': 38,
'and': 8743,
'ang': 8736,
'apos': 39,
'aring': 229,
'asymp': 8776,
'atilde': 227,
'auml': 228,
'bdquo': 8222,
'beta': 946,
'brvbar': 166,
'bull': 8226,
'cap': 8745,
'ccedil': 231,
'cedil': 184,
'cent': 162,
'chi': 967,
'circ': 710,
'clubs': 9827,
'cong': 8773,
'copy': 169,
'crarr': 8629,
'cup': 8746,
'curren': 164,
'dArr': 8659,
'dagger': 8224,
'darr': 8595,
'deg': 176,
'delta': 948,
'diams': 9830,
'divide': 247,
'eacute': 233,
'ecirc': 234,
'egrave': 232,
'empty': 8709,
'emsp': 8195,
'ensp': 8194,
'epsilon': 949,
'equiv': 8801,
'eta': 951,
'eth': 240,
'euml': 235,
'euro': 8364,
'exist': 8707,
'fnof': 402,
'forall': 8704,
'frac12': 189,
'frac14': 188,
'frac34': 190,
'frasl': 8260,
'gamma': 947,
'ge': 8805,
'gt': 62,
'hArr': 8660,
'harr': 8596,
'hearts': 9829,
'hellip': 8230,
'iacute': 237,
'icirc': 238,
'iexcl': 161,
'igrave': 236,
'image': 8465,
'infin': 8734,
'int': 8747,
'iota': 953,
'iquest': 191,
'isin': 8712,
'iuml': 239,
'kappa': 954,
'lArr': 8656,
'lambda': 955,
'lang': 9001,
'laquo': 171,
'larr': 8592,
'lceil': 8968,
'ldquo': 8220,
'le': 8804,
'lfloor': 8970,
'lowast': 8727,
'loz': 9674,
'lrm': 8206,
'lsaquo': 8249,
'lsquo': 8216,
'lt': 60,
'macr': 175,
'mdash': 8212,
'micro': 181,
'middot': 183,
'minus': 8722,
'mu': 956,
'nabla': 8711,
'nbsp': 160,
'ndash': 8211,
'ne': 8800,
'ni': 8715,
'not': 172,
'notin': 8713,
'nsub': 8836,
'ntilde': 241,
'nu': 957,
'oacute': 243,
'ocirc': 244,
'oelig': 339,
'ograve': 242,
'oline': 8254,
'omega': 969,
'omicron': 959,
'oplus': 8853,
'or': 8744,
'ordf': 170,
'ordm': 186,
'oslash': 248,
'otilde': 245,
'otimes': 8855,
'ouml': 246,
'para': 182,
'part': 8706,
'permil': 8240,
'perp': 8869,
'phi': 966,
'pi': 960,
'piv': 982,
'plusmn': 177,
'pound': 163,
'prime': 8242,
'prod': 8719,
'prop': 8733,
'psi': 968,
'quot': 34,
'rArr': 8658,
'radic': 8730,
'rang': 9002,
'raquo': 187,
'rarr': 8594,
'rceil': 8969,
'rdquo': 8221,
'real': 8476,
'reg': 174,
'rfloor': 8971,
'rho': 961,
'rlm': 8207,
'rsaquo': 8250,
'rsquo': 8217,
'sbquo': 8218,
'scaron': 353,
'sdot': 8901,
'sect': 167,
'shy': 173,
'sigma': 963,
'sigmaf': 962,
'sim': 8764,
'spades': 9824,
'sub': 8834,
'sube': 8838,
'sum': 8721,
'sup': 8835,
'sup1': 185,
'sup2': 178,
'sup3': 179,
'supe': 8839,
'szlig': 223,
'tau': 964,
'there4': 8756,
'theta': 952,
'thetasym': 977,
'thinsp': 8201,
'thorn': 254,
'tilde': 732,
'times': 215,
'trade': 8482,
'uArr': 8657,
'uacute': 250,
'uarr': 8593,
'ucirc': 251,
'ugrave': 249,
'uml': 168,
'upsih': 978,
'upsilon': 965,
'uuml': 252,
'weierp': 8472,
'xi': 958,
'yacute': 253,
'yen': 165,
'yuml': 255,
'zeta': 950,
'zwj': 8205,
'zwnj': 8204
}
|
akosyakov/intellij-community
|
refs/heads/master
|
python/testData/intentions/beforeReturnTypeInPy3Annotation1.py
|
83
|
def my_func(p1=1):
return p1
d = my<caret>_func(1)
|
popazerty/dvbapp-gui2
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/AtemioDeviceManager/HddSetup.py
|
1
|
from enigma import *
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.GUIComponent import GUIComponent
from Components.HTMLComponent import HTMLComponent
from Tools.Directories import fileExists, crawlDirectory
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.Button import Button
from Components.Label import Label
from ExtrasList import ExtrasList
from Screens.MessageBox import MessageBox
from HddPartitions import HddPartitions
from HddInfo import HddInfo
from Disks import Disks
from ExtraMessageBox import ExtraMessageBox
from ExtraActionBox import ExtraActionBox
from MountPoints import MountPoints
import os
import sys
#from __init__ import _, loadPluginSkin
def DiskEntry(model, size, removable):
res = [(model, size, removable)]
if removable:
picture = '/usr/lib/enigma2/python/Plugins/SystemPlugins/AtemioDeviceManager/icons/diskusb.png'
else:
picture = '/usr/lib/enigma2/python/Plugins/SystemPlugins/AtemioDeviceManager/icons/disk.png'
if fileExists(picture):
res.append(MultiContentEntryPixmapAlphaTest(pos=(5, 0), size=(48, 48), png=loadPNG(picture)))
res.append(MultiContentEntryText(pos=(65, 10), size=(360, 38), font=0, text=model))
res.append(MultiContentEntryText(pos=(435, 10), size=(125, 38), font=0, text=size))
return res
class HddSetup(Screen):
skin = """
<screen name="HddSetup" position="center,center" size="560,430" title="Hard Drive Setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="key_yellow" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="key_blue" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="menu" position="20,45" size="520,380" scrollbarMode="showOnDemand" itemHeight="50" transparent="1" />
</screen>"""
def __init__(self, session, args = 0):
self.session = session
Screen.__init__(self, session)
self.disks = list()
self.mdisks = Disks()
for disk in self.mdisks.disks:
capacity = '%d MB' % (disk[1] / 1048576)
self.disks.append(DiskEntry(disk[3], capacity, disk[2]))
self['menu'] = ExtrasList(self.disks)
self['key_red'] = Button(_('Partitions'))
self['key_green'] = Button('Info')
self['key_yellow'] = Button(_('Initialize'))
self['key_blue'] = Button(_('Exit'))
self['actions'] = ActionMap(['OkCancelActions', 'ColorActions'], {'blue': self.quit,
'yellow': self.yellow,
'green': self.green,
'red': self.red,
'cancel': self.quit}, -2)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_('Atemio Device Manager'))
def mkfs(self):
self.formatted += 1
return self.mdisks.mkfs(self.mdisks.disks[self.sindex][0], self.formatted)
def refresh(self):
self.disks = list()
self.mdisks = Disks()
for disk in self.mdisks.disks:
capacity = '%d MB' % (disk[1] / 1048576)
self.disks.append(DiskEntry(disk[3], capacity, disk[2]))
self['menu'].setList(self.disks)
def checkDefault(self):
mp = MountPoints()
mp.read()
if not mp.exist('/hdd'):
mp.add(self.mdisks.disks[self.sindex][0], 1, '/hdd')
mp.write()
mp.mount(self.mdisks.disks[self.sindex][0], 1, '/hdd')
os.system('/bin/mkdir /hdd/movie')
os.system('/bin/mkdir /hdd/music')
os.system('/bin/mkdir /hdd/picture')
def format(self, result):
if result != 0:
self.session.open(MessageBox, _('Cannot format partition %d' % self.formatted), MessageBox.TYPE_ERROR)
if self.result == 0:
if self.formatted > 0:
self.checkDefault()
self.refresh()
return
elif self.result > 0 and self.result < 3:
if self.formatted > 1:
self.checkDefault()
self.refresh()
return
elif self.result == 3:
if self.formatted > 2:
self.checkDefault()
self.refresh()
return
elif self.result == 4:
if self.formatted > 3:
self.checkDefault()
self.refresh()
return
self.session.openWithCallback(self.format, ExtraActionBox, _('Formatting partition %d') % (self.formatted + 1), 'Initialize disk', self.mkfs)
def fdiskEnded(self, result):
if result == 0:
self.format(0)
elif result == -1:
self.session.open(MessageBox, _('Cannot umount device.\nA record in progress, timeshit or some external tools (like samba, swapfile and nfsd) may cause this problem.\nPlease stop this actions/applications and try again'), MessageBox.TYPE_ERROR)
else:
self.session.open(MessageBox, _('Partitioning failed!'), MessageBox.TYPE_ERROR)
def fdisk(self):
return self.mdisks.fdisk(self.mdisks.disks[self.sindex][0], self.mdisks.disks[self.sindex][1], self.result)
def initialaze(self, result):
if result != 5:
self.result = result
self.formatted = 0
mp = MountPoints()
mp.read()
mp.deleteDisk(self.mdisks.disks[self.sindex][0])
mp.write()
self.session.openWithCallback(self.fdiskEnded, ExtraActionBox, _('Partitioning...'), _('Initialize disk'), self.fdisk)
def yellow(self):
if len(self.mdisks.disks) > 0:
self.sindex = self['menu'].getSelectedIndex()
self.session.openWithCallback(self.initialaze, ExtraMessageBox, _('Please select your preferred configuration.'), _('HDD Partitioner'), [[_('One partition'), 'partitionmanager.png'],
[_('Two partitions (50% - 50%)'), 'partitionmanager.png'],
[_('Two partitions (75% - 25%)'), 'partitionmanager.png'],
[_('Three partitions (33% - 33% - 33%)'), 'partitionmanager.png'],
[_('Four partitions (25% - 25% - 25% - 25%)'), 'partitionmanager.png'],
[_('Cancel'), 'cancel.png']], 1, 5)
def green(self):
if len(self.mdisks.disks) > 0:
self.sindex = self['menu'].getSelectedIndex()
self.session.open(HddInfo, self.mdisks.disks[self.sindex][0])
def red(self):
if len(self.mdisks.disks) > 0:
self.sindex = self['menu'].getSelectedIndex()
self.session.open(HddPartitions, self.mdisks.disks[self.sindex])
def quit(self):
self.close()
|
fenginx/django
|
refs/heads/master
|
tests/postgres_tests/test_citext.py
|
89
|
"""
The citext PostgreSQL extension supports indexing of case-insensitive text
strings and thus eliminates the need for operations such as iexact and other
modifiers to enforce use of an index.
"""
from django.db import IntegrityError
from django.test.utils import modify_settings
from . import PostgreSQLTestCase
from .models import CITestModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class CITextTestCase(PostgreSQLTestCase):
case_sensitive_lookups = ('contains', 'startswith', 'endswith', 'regex')
@classmethod
def setUpTestData(cls):
cls.john = CITestModel.objects.create(
name='JoHn',
email='joHn@johN.com',
description='Average Joe named JoHn',
array_field=['JoE', 'jOhn'],
)
def test_equal_lowercase(self):
"""
citext removes the need for iexact as the index is case-insensitive.
"""
self.assertEqual(CITestModel.objects.filter(name=self.john.name.lower()).count(), 1)
self.assertEqual(CITestModel.objects.filter(email=self.john.email.lower()).count(), 1)
self.assertEqual(CITestModel.objects.filter(description=self.john.description.lower()).count(), 1)
def test_fail_citext_primary_key(self):
"""
Creating an entry for a citext field used as a primary key which
clashes with an existing value isn't allowed.
"""
with self.assertRaises(IntegrityError):
CITestModel.objects.create(name='John')
def test_array_field(self):
instance = CITestModel.objects.get()
self.assertEqual(instance.array_field, self.john.array_field)
self.assertTrue(CITestModel.objects.filter(array_field__contains=['joe']).exists())
def test_lookups_name_char(self):
for lookup in self.case_sensitive_lookups:
with self.subTest(lookup=lookup):
query = {'name__{}'.format(lookup): 'john'}
self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
def test_lookups_description_text(self):
for lookup, string in zip(self.case_sensitive_lookups, ('average', 'average joe', 'john', 'Joe.named')):
with self.subTest(lookup=lookup, string=string):
query = {'description__{}'.format(lookup): string}
self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
def test_lookups_email(self):
for lookup, string in zip(self.case_sensitive_lookups, ('john', 'john', 'john.com', 'john.com')):
with self.subTest(lookup=lookup, string=string):
query = {'email__{}'.format(lookup): string}
self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
|
markflorisson/datashape
|
refs/heads/master
|
datashape/coretypes.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
"""
This defines the DataShape type system, with unified
shape and data type.
"""
import ctypes
import datetime
import operator
import numpy as np
from .py2help import _inttypes, _strtypes, unicode
# Classes of unit types.
DIMENSION = 1
MEASURE = 2
class Type(type):
_registry = {}
def __new__(meta, name, bases, dct):
cls = type(name, bases, dct)
# Don't register abstract classes
if not dct.get('abstract'):
Type._registry[name] = cls
return cls
@classmethod
def register(cls, name, type):
# Don't clobber existing types.
if name in cls._registry:
raise TypeError('There is another type registered with name %s'
% name)
cls._registry[name] = type
@classmethod
def lookup_type(cls, name):
return cls._registry[name]
class Mono(object):
"""
Monotype are unqualified 0 parameters.
Each type must be reconstructable using its parameters:
type(datashape_type)(*type.parameters)
"""
composite = False
__metaclass__ = Type
def __init__(self, *params):
self.parameters = params
@property
def shape(self):
return ()
def __len__(self):
return 1
def __getitem__(self, key):
lst = [self]
return lst[key]
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s(%s)' % (type(self).__name__,
", ".join(map(repr, self.parameters)))
# Form for searching signature in meta-method Dispatch Table
def sigform(self):
return self
# Monotypes are their own measure
@property
def measure(self):
return self
def subarray(self, leading):
"""Returns a data shape object of the subarray with 'leading'
dimensions removed. In the case of a measure such as CType,
'leading' must be 0, and self is returned.
"""
if leading >= 1:
raise IndexError(('Not enough dimensions in data shape '
'to remove %d leading dimensions.') % leading)
else:
return self
class Unit(Mono):
"""
Unit type that does not need to be reconstructed.
"""
class Ellipsis(Mono):
"""
Ellipsis (...). Used to indicate a variable number of dimensions.
E.g.:
..., float32 # float32 array w/ any number of dimensions
A..., float32 # float32 array w/ any number of dimensions,
# associated with type variable A
"""
def __init__(self, typevar=None):
self.parameters = (typevar,)
@property
def typevar(self):
return self.parameters[0]
def __str__(self):
if self.typevar:
return str(self.typevar) + '...'
return '...'
def __repr__(self):
return 'Ellipsis("%s")' % (str(self),)
def __hash__(self):
return hash('...')
class Null(Unit):
"""
The null datashape.
"""
def __str__(self):
return expr_string('null', None)
class IntegerConstant(Unit):
"""
An integer which is a parameter to a type constructor. It is itself a
degenerate type constructor taking 0 parameters.
::
1, int32 # 1 is Fixed
Range(1,5) # 1 is IntegerConstant
"""
cls = None
def __init__(self, i):
assert isinstance(i, _inttypes)
self.parameters = (i,)
self.val = i
def __str__(self):
return str(self.val)
def __eq__(self, other):
if isinstance(other, _inttypes):
return self.val == other
elif isinstance(other, IntegerConstant):
return self.val == other.val
else:
raise TypeError("Cannot compare type %s to type %s" % (type(self), type(other)))
def __hash__(self):
return hash(self.val)
class StringConstant(Unit):
"""
Strings at the level of the constructor.
::
string(3, "utf-8") # "utf-8" is StringConstant
"""
def __init__(self, i):
assert isinstance(i, _strtypes)
self.parameters = (i,)
self.val = i
def __str__(self):
return repr(self.val)
def __eq__(self, other):
if isinstance(other, _strtypes):
return self.val == other
elif isinstance(other, StringConstant):
return self.val == other.val
else:
raise TypeError("Cannot compare type %s to type %s" % (type(self), type(other)))
def __hash__(self):
return hash(self.val)
class Bytes(Unit):
""" Bytes type """
cls = MEASURE
def __str__(self):
return 'bytes'
def __eq__(self, other):
return isinstance(other, Bytes)
_canonical_string_encodings = {
u'A' : u'A',
u'ascii' : u'A',
u'U8' : u'U8',
u'utf-8' : u'U8',
u'utf_8' : u'U8',
u'utf8' : u'U8',
u'U16' : u'U16',
u'utf-16' : u'U16',
u'utf_16' : u'U16',
u'utf16' : u'U16',
u'U32' : u'U32',
u'utf-32' : u'U32',
u'utf_32' : u'U32',
u'utf32' : u'U32'
}
class String(Unit):
""" String container """
cls = MEASURE
def __init__(self, fixlen=None, encoding=None):
# TODO: Do this constructor better...
if fixlen is None and encoding is None:
# String()
self.fixlen = None
self.encoding = u'U8'
elif isinstance(fixlen, _inttypes + (IntegerConstant,)) and \
encoding is None:
# String(fixlen)
if isinstance(fixlen, IntegerConstant):
self.fixlen = fixlen.val
else:
self.fixlen = fixlen
self.encoding = u'U8'
elif isinstance(fixlen, _strtypes + (StringConstant,)) and \
encoding is None:
# String('encoding')
self.fixlen = None
if isinstance(fixlen, StringConstant):
self.encoding = fixlen.val
else:
self.encoding = unicode(fixlen)
elif isinstance(fixlen, _inttypes + (IntegerConstant,)) and \
isinstance(encoding, _strtypes + (StringConstant,)):
# String(fixlen, 'encoding')
if isinstance(fixlen, IntegerConstant):
self.fixlen = fixlen.val
else:
self.fixlen = fixlen
if isinstance(encoding, StringConstant):
self.encoding = encoding.val
else:
self.encoding = unicode(encoding)
else:
raise ValueError(('Unexpected types to String constructor '
'(%s, %s)') % (type(fixlen), type(encoding)))
# Validate the encoding
if not self.encoding in _canonical_string_encodings:
raise ValueError('Unsupported string encoding %s' %
repr(self.encoding))
# Put it in a canonical form
self.encoding = _canonical_string_encodings[self.encoding]
def __str__(self):
if self.fixlen is None and self.encoding == 'U8':
return 'string'
elif self.fixlen is not None and self.encoding == 'U8':
return 'string(%i)' % self.fixlen
elif self.fixlen is None and self.encoding != 'U8':
return 'string(%s)' % repr(self.encoding).strip('u')
else:
return 'string(%i, %s)' % \
(self.fixlen, repr(self.encoding).strip('u'))
def __repr__(self):
return ''.join(["ctype(\"", str(self).encode('unicode_escape').decode('ascii'), "\")"])
def __eq__(self, other):
if type(other) is String:
return self.fixlen == other.fixlen and \
self.encoding == other.encoding
else:
return False
def __hash__(self):
return hash((self.fixlen, self.encoding))
class DataShape(Mono):
"""The Datashape class, implementation for generic composite
datashape objects"""
__metaclass__ = Type
composite = False
def __init__(self, *parameters, **kwds):
if len(parameters) > 0:
self.parameters = tuple(parameters)
if getattr(self.parameters[-1], 'cls', MEASURE) != MEASURE:
raise TypeError(('Only a measure can appear on the'
' last position of a datashape, not %s') %
repr(self.parameters[-1]))
for dim in self.parameters[:-1]:
if getattr(dim, 'cls', DIMENSION) != DIMENSION:
raise TypeError(('Only dimensions can appear before the'
' last position of a datashape, not %s') %
repr(dim))
else:
raise ValueError(('the data shape should be constructed from 2 or'
' more parameters, only got %s') % (len(parameters)))
self.composite = True
name = kwds.get('name')
if name:
self.name = name
self.__metaclass__._registry[name] = self
else:
self.name = None
###
# TODO: Why are low-level concepts like strides and alignment on
# TODO: the datashape?
###
def __len__(self):
return len(self.parameters)
def __getitem__(self, index):
return self.parameters[index]
def __str__(self):
if self.name:
res = self.name
else:
res = (', '.join(map(str, self.parameters)))
return res
def __eq__(self, other):
if isinstance(other, DataShape):
return self.parameters == other.parameters
elif isinstance(other, Mono):
return False
else:
raise TypeError(('Cannot compare non-datashape '
'type %s to datashape') % type(other))
def __hash__(self):
return hash(tuple(a for a in self))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return ''.join(["dshape(\"",
str(self).encode('unicode_escape').decode('ascii'),
"\")"])
@property
def shape(self):
return self.parameters[:-1]
@property
def measure(self):
return self.parameters[-1]
def sigform(self):
"""Return a data shape object with Fixed dimensions replaced
by TypeVar dimensions.
"""
newparams = [TypeVar('i%s'%n) for n in range(len(self.parameters)-1)]
newparams.append(self.parameters[-1])
return DataShape(*newparams)
def subarray(self, leading):
"""Returns a data shape object of the subarray with 'leading'
dimensions removed.
"""
if leading >= len(self.parameters):
raise IndexError(('Not enough dimensions in data shape '
'to remove %d leading dimensions.') % leading)
elif leading in [len(self.parameters) - 1, -1]:
return self.parameters[-1]
else:
return DataShape(*self.parameters[leading:])
class Enum(DataShape):
def __init__(self, name, *elts):
self.parameters = (name,) + elts
self.name = name
self.elts = elts
def __str__(self):
if self.name:
return 'Enum(%s, %s)' % (self.name, ','.join(map(str, self.elts)))
else:
return 'Enum(%s)' % ','.join(map(str, self.elts))
def __repr__(self):
return str(self)
def __eq__(self, other):
raise NotImplementedError
def __hash__(self):
raise NotImplementedError
class Option(DataShape):
"""
Measure types which may or may not hold data. Makes no
indication of how this is implemented in memory.
"""
def __init__(self, *params):
if len(params) != 1:
raise TypeError('Option only takes 1 argument')
if not params[0].cls == MEASURE:
raise TypeError('Option only takes measure argument')
self.parameters = params
self.ty = params[0]
def __str__(self):
return 'Option(%s)' % str(self.ty)
def __repr__(self):
return str(self)
class CType(Unit):
"""
Symbol for a sized type mapping uniquely to a native type.
"""
cls = MEASURE
def __init__(self, name, itemsize, alignment):
self.name = name
self._itemsize = itemsize
self._alignment = alignment
Type.register(name, self)
self.parameters = (name,)
@classmethod
def from_numpy_dtype(self, dt):
"""
From Numpy dtype.
>>> from datashape import CType
>>> from numpy import dtype
>>> CType.from_numpy_dtype(dtype('int32'))
ctype("int32")
"""
return Type._registry[dt.name]
@property
def itemsize(self):
"""The size of one element of this type."""
return self._itemsize
@property
def c_itemsize(self):
"""The size of one element of this type, with C-contiguous storage."""
return self._itemsize
@property
def c_alignment(self):
"""The alignment of one element of this type."""
return self._alignment
def to_numpy_dtype(self):
"""
To Numpy dtype.
"""
# Fixup the complex type to how numpy does it
s = self.name
s = {'complex[float32]':'complex64',
'complex[float64]':'complex128'}.get(s, s)
return np.dtype(s)
def __str__(self):
return self.name
def __repr__(self):
return ''.join(["ctype(\"", str(self).encode('unicode_escape').decode('ascii'), "\")"])
def __eq__(self, other):
if type(other) is CType:
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class Fixed(Unit):
"""
Fixed dimension.
"""
cls = DIMENSION
def __init__(self, i):
# Use operator.index, so Python integers, numpy int scalars, etc work
i = operator.index(i)
if i < 0:
raise ValueError('Fixed dimensions must be positive')
self.val = i
self.parameters = (self.val,)
def __index__(self):
return self.val
def __int__(self):
return self.val
def __eq__(self, other):
if type(other) is Fixed:
return self.val == other.val
elif isinstance(other, _inttypes):
return self.val == other
else:
return False
def __hash__(self):
return hash(self.val)
def __str__(self):
return str(self.val)
class Var(Unit):
""" Variable dimension """
cls = DIMENSION
def __str__(self):
return 'var'
def __eq__(self, other):
return isinstance(other, Var)
def __hash__(self):
return id(Var)
class TypeVar(Unit):
"""
A free variable in the signature. Not user facing.
"""
# cls could be MEASURE or DIMENSION, depending on context
def __init__(self, symbol):
if symbol.startswith("'"):
symbol = symbol[1:]
self.symbol = symbol
self.parameters = (symbol,)
def __repr__(self):
return "TypeVar(%s)" % (str(self),)
def __str__(self):
return str(self.symbol)
# All TypeVariables compare equal
# dshape('M,int32') = dshape('N,int32')
# def __eq__(self, other):
# if not isinstance(other, TypeVar):
# return False
# else:
# return True
# def __hash__(self):
# return hash(self.__class__)
class Implements(Mono):
"""
Type representing a constraint on the subtype term (which must be a
TypeVar), namely that it must belong to a given type set.
"""
@property
def typevar(self):
return self.parameters[0]
@property
def typeset(self):
return self.parameters[1]
def __repr__(self):
return '%s : %s' % (self.typevar, self.typeset.name)
class Range(Mono):
"""
Range type representing a bound or unbound interval of
of possible Fixed dimensions.
"""
cls = DIMENSION
def __init__(self, a, b=False):
if isinstance(a, _inttypes):
self.a = a
elif isinstance(a, IntegerConstant):
self.a = a.val
else:
raise TypeError('Expected integer for parameter a, not %s' % type(a))
if isinstance(b, _inttypes):
self.b = b
elif b is False or b is None:
self.b = b
elif isinstance(b, IntegerConstant):
self.b = b.val
else:
raise TypeError('Expected integer for parameter b, not %s' % type(b))
if a and b:
assert self.a < self.b, 'Must have upper < lower'
self.parameters = (self.a, self.b)
@property
def upper(self):
# Just upper bound
if self.b == False:
return self.a
# No upper bound case
elif self.b == None:
return float('inf')
# Lower and upper bound
else:
return self.b
@property
def lower(self):
# Just upper bound
if self.b == False:
return 0
# No upper bound case
elif self.b == None:
return self.a
# Lower and upper bound
else:
return self.a
def __eq__(self, other):
if not isinstance(other, Range):
raise TypeError("Cannot compare type %s to type %s" % (type(self), type(other)))
else:
return self.a == other.a and self.b == other.b
def __hash__(self):
return hash((self.a, self.b))
def __str__(self):
return expr_string('Range', [self.lower, self.upper])
class Function(Mono):
"""
Used for function signatures.
"""
def __init__(self, *parameters):
self.parameters = parameters
@property
def restype(self):
return self.parameters[-1]
@property
def argtypes(self):
return self.parameters[:-1]
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.parameters == other.parameters)
def __hash__(self):
return 1
def __ne__(self, other):
return not self == other
# def __repr__(self):
# return " -> ".join(map(repr, self.parameters))
def __str__(self):
return " -> ".join(map(str, self.parameters))
class Record(Mono):
"""
A composite data structure of ordered fields mapped to types.
"""
cls = MEASURE
def __init__(self, fields):
"""
Parameters
----------
fields : list/OrderedDict of (name, type) entries
The fields which make up the record.
"""
# This is passed in with a OrderedDict so field order is
# preserved. Using RecordDecl there is some magic to also
# ensure that the fields align in the order they are
# declared.
self.__fields = tuple(fields)
self.__fdict = dict(fields)
self.__fnames = [f[0] for f in fields]
self.__ftypes = [f[1] for f in fields]
self.parameters = (fields,)
@property
def fields(self):
return self.__fdict
@property
def names(self):
return self.__fnames
@property
def types(self):
return self.__ftypes
def to_numpy_dtype(self):
"""
To Numpy record dtype.
"""
dk = self.__fnames
dv = map(to_numpy_dtype, self.__ftypes)
# Need to cast to a list for python 3,
# because zip returns an iterator
return np.dtype(list(zip(dk, dv)))
def __getitem__(self, key):
return self.__fdict[key]
def __eq__(self, other):
if isinstance(other, Record):
return self.__fdict == other.__fdict
else:
return False
def __hash__(self):
return hash(self.__fields)
def __str__(self):
return record_string(self.__fnames, self.__ftypes)
def __repr__(self):
return ''.join(["dshape(\"", str(self).encode('unicode_escape').decode('ascii'), "\")"])
class JSON(Mono):
""" JSON measure """
cls = MEASURE
def __init__(self):
self.parameters = ()
def __str__(self):
return 'json'
def __eq__(self, other):
return isinstance(other, JSON)
class TypeConstructor(type):
"""
Generic type constructor.
Attributes:
===========
n: int
number of parameters
flags: [{str: object}]
flag for each parameter. Built-in flags include:
* 'coercible': True/False. The default is False
"""
def __new__(cls, name, n, flags, is_vararg=False):
def __init__(self, *params):
if len(params) != n:
if not (is_vararg and len(params) >= n):
raise TypeError(
"Expected %d parameters for constructor %s, got %d" % (
n, name, len(params)))
self.parameters = params
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.parameters == other.parameters and
self.flags == other.flags)
def __hash__(self):
return hash((name, n, self.parameters))
def __str__(self):
return "%s[%s]" % (name, ", ".join(map(str, self.parameters)))
d = {
'__init__': __init__,
'__repr__': __str__,
'__str__': __str__,
'__eq__': __eq__,
'__ne__': lambda self, other: not (self == other),
'__hash__': __hash__,
'flags': flags,
}
self = super(TypeConstructor, cls).__new__(cls, name, (Mono,), d)
self.name = name
self.n = n
self.flags = flags
return self
def __init__(self, *args, **kwds):
pass # Swallow arguments
def __eq__(cls, other):
return (isinstance(other, TypeConstructor) and
cls.name == other.name and cls.n == other.n and
cls.flags == other.flags)
def __ne__(cls, other):
return not (cls == other)
def __hash__(cls):
return hash((cls.name, cls.n))
bool_ = CType('bool', 1, 1)
char = CType('char', 1, 1)
int8 = CType('int8', 1, 1)
int16 = CType('int16', 2, ctypes.alignment(ctypes.c_int16))
int32 = CType('int32', 4, ctypes.alignment(ctypes.c_int32))
int64 = CType('int64', 8, ctypes.alignment(ctypes.c_int64))
uint8 = CType('uint8', 1, 1)
uint16 = CType('uint16', 2, ctypes.alignment(ctypes.c_uint16))
uint32 = CType('uint32', 4, ctypes.alignment(ctypes.c_uint32))
uint64 = CType('uint64', 8, ctypes.alignment(ctypes.c_uint64))
float16 = CType('float16', 2, ctypes.alignment(ctypes.c_uint16))
float32 = CType('float32', 4, ctypes.alignment(ctypes.c_float))
float64 = CType('float64', 8, ctypes.alignment(ctypes.c_double))
#float128 = CType('float128', 16)
complex_float32 = CType('complex[float32]', 8, ctypes.alignment(ctypes.c_float))
complex_float64 = CType('complex[float64]', 16, ctypes.alignment(ctypes.c_double))
Type.register('complex64', complex_float32)
complex64 = complex_float32
Type.register('complex128', complex_float64)
complex128 = complex_float64
#complex256 = CType('complex256', 32)
timedelta64 = CType('timedelta64', 8, ctypes.alignment(ctypes.c_int64))
datetime64 = CType('datetime64', 8, ctypes.alignment(ctypes.c_int64))
date = CType('date', 4, 4)
c_byte = int8
c_short = int16
c_int = int32
c_longlong = int64
c_ubyte = uint8
c_ushort = uint16
c_ulonglong = uint64
if ctypes.sizeof(ctypes.c_long) == 4:
c_long = int32
c_ulong = uint32
else:
c_long = int64
c_ulong = uint64
if ctypes.sizeof(ctypes.c_void_p) == 4:
intptr = c_ssize_t = int32
uintptr = c_size_t = uint32
else:
intptr = c_ssize_t = int64
uintptr = c_size_t = uint64
Type.register('intptr', intptr)
Type.register('uintptr', uintptr)
c_half = float16
c_float = float32
c_double = float64
# TODO: Deal with the longdouble == one of float64/float80/float96/float128 situation
#c_longdouble = float128
half = float16
single = float32
double = float64
# TODO: the semantics of these are still being discussed
int_ = int32
float_ = float32
void = CType('void', 0, 1)
object_ = pyobj = CType('object',
ctypes.sizeof(ctypes.py_object),
ctypes.alignment(ctypes.py_object))
na = Null
NullRecord = Record(())
bytes_ = Bytes()
string = String()
json = JSON()
Type.register('float', c_float)
Type.register('double', c_double)
Type.register('bytes', bytes_)
Type.register('string', String())
class NotNumpyCompatible(Exception):
"""
Raised when we try to convert a datashape into a NumPy dtype
but it cannot be ceorced.
"""
pass
def to_numpy_dtype(ds):
""" Throw away the shape information and just return the
measure as NumPy dtype instance."""
return to_numpy(ds[-1])
def to_numpy(ds):
"""
Downcast a datashape object into a Numpy (shape, dtype) tuple if
possible.
>>> from datashape import dshape, to_numpy
>>> to_numpy(dshape('5, 5, int32'))
((5, 5), dtype('int32'))
"""
if isinstance(ds, CType):
return ds.to_numpy_dtype()
shape = tuple()
dtype = None
#assert isinstance(ds, DataShape)
# The datashape dimensions
for dim in ds[:-1]:
if isinstance(dim, IntegerConstant):
shape += (dim,)
elif isinstance(dim, Fixed):
shape += (dim.val,)
elif isinstance(dim, TypeVar):
shape += (-1,)
else:
raise NotNumpyCompatible('Datashape dimension %s is not NumPy-compatible' % dim)
# The datashape measure
msr = ds[-1]
if isinstance(msr, CType):
dtype = msr.to_numpy_dtype()
elif isinstance(msr, Record):
dtype = msr.to_numpy_dtype()
else:
raise NotNumpyCompatible('Datashape measure %s is not NumPy-compatible' % msr)
if type(dtype) != np.dtype:
raise NotNumpyCompatible('Internal Error: Failed to produce NumPy dtype')
return (shape, dtype)
def from_numpy(shape, dt):
"""
Upcast a (shape, dtype) tuple if possible.
>>> from datashape import from_numpy
>>> from numpy import dtype
>>> from_numpy((5,5), dtype('int32'))
dshape("5, 5, int32")
"""
dtype = np.dtype(dt)
if dtype.kind == 'S':
measure = String(dtype.itemsize, 'A')
elif dtype.kind == 'U':
measure = String(dtype.itemsize / 4, 'U8')
elif dtype.fields:
field_items = [(name, dtype.fields[name]) for name in dtype.names]
rec = [(a,CType.from_numpy_dtype(b[0])) for a,b in field_items]
measure = Record(rec)
else:
measure = CType.from_numpy_dtype(dtype)
if shape == ():
return measure
else:
return DataShape(*tuple(map(Fixed, shape))+(measure,))
def typeof(obj):
"""
Return a datashape ctype for a python scalar.
"""
if hasattr(obj, "dshape"):
return obj.dshape
elif isinstance(obj, np.ndarray):
return from_numpy(obj.shape, obj.dtype)
elif isinstance(obj, _inttypes):
return DataShape(int_)
elif isinstance(obj, float):
return DataShape(double)
elif isinstance(obj, complex):
return DataShape(complex128)
elif isinstance(obj, _strtypes):
return DataShape(string)
elif isinstance(obj, datetime.timedelta):
return DataShape(timedelta64)
elif isinstance(obj, datetime.datetime):
return DataShape(datetime64)
else:
return DataShape(pyobj)
def expr_string(spine, const_args, outer=None):
if not outer:
outer = '()'
if const_args:
return str(spine) + outer[0] + ','.join(map(str,const_args)) + outer[1]
else:
return str(spine)
def record_string(fields, values):
# Prints out something like this:
# {a : int32, b: float32, ... }
body = ''
count = len(fields)
for i, (k,v) in enumerate(zip(fields,values)):
if (i+1) == count:
body += '%s : %s' % (k,v)
else:
body += '%s : %s; ' % (k,v)
return '{ ' + body + ' }'
def free(ds):
"""
Return the free variables (TypeVar) of a datashape type (Mono).
"""
if isinstance(ds, TypeVar):
return [ds]
elif isinstance(ds, Mono) and not isinstance(ds, Unit):
result = []
for x in ds.parameters:
result.extend(free(x))
return result
else:
return []
def type_constructor(ds):
"""
Get the type constructor for the datashape type (Mono).
The type constructor indicates how types unify (see unification.py).
"""
return type(ds)
|
planaspa/Data-Mining
|
refs/heads/master
|
tests/test_lengthPlot.py
|
1
|
from src.lengthPlot import *
db = 'db/test.db'
def test_text_format():
assert text_format("asdkjhaeih") == "asdkjhaeih"
assert text_format("as&dkj>hae<ih") == "as&dkj>hae<ih"
assert text_format("") == ""
def test_loadData():
conn = sqlite3.connect(db)
c = conn.cursor()
tweetInfo = loadData(c)
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", LAT, LONG, FOLLOWERS) "
"VALUES(0, 'test2 test0',7 , 5,-5.6, 6.12, 105)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(1, 'test2 test0',0 , 0, 5)")
conn.execute("INSERT INTO TWEETS(ID, TWEET_TEXT, FAVS, RTS"
", FOLLOWERS) "
"VALUES(2, 'test2',3 , 3, 30)")
tweetInfo2 = loadData(c)
conn.execute("DELETE FROM TWEETS WHERE ID=0")
conn.execute("DELETE FROM TWEETS WHERE ID=1")
conn.execute("DELETE FROM TWEETS WHERE ID=2")
# Closing the connection
conn.close()
assert tweetInfo == []
assert tweetInfo2 == [(11, 5, 7), (11, 0, 0), (5, 3, 3)]
def test_spreadInfo():
tweetInfo = [(11, 5, 7), (11, 0, 0), (5, 3, 3)]
info = spreadInfo(tweetInfo)
rtsPerTweet = info[0]
favsPerTweet = info[1]
numberOfTweet = info[2]
testRts = [0] * 141
testFavs = [0] * 141
testNumber = [0] * 141
testRts[5] = 3
testRts[11] = 5
testFavs[5] = 3
testFavs[11] = 7
testNumber[5] = 1
testNumber[11] = 2
assert rtsPerTweet == testRts
assert favsPerTweet == testFavs
assert numberOfTweet == testNumber
|
Nick-OpusVL/odoo
|
refs/heads/8.0
|
addons/l10n_si/account_wizard.py
|
255
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 6,
}
|
ammarkhann/FinalSeniorCode
|
refs/heads/master
|
lib/python2.7/site-packages/sympy/simplify/tests/test_sqrtdenest.py
|
98
|
from sympy import sqrt, root, S, Symbol, sqrtdenest, Integral, cos
from sympy.simplify.sqrtdenest import _subsets as subsets
r2, r3, r5, r6, r7, r10, r15, r29 = [sqrt(x) for x in [2, 3, 5, 6, 7, 10,
15, 29]]
def test_sqrtdenest():
d = {sqrt(5 + 2 * r6): r2 + r3,
sqrt(5. + 2 * r6): sqrt(5. + 2 * r6),
sqrt(5. + 4*sqrt(5 + 2 * r6)): sqrt(5.0 + 4*r2 + 4*r3),
sqrt(r2): sqrt(r2),
sqrt(5 + r7): sqrt(5 + r7),
sqrt(3 + sqrt(5 + 2*r7)):
3*r2*(5 + 2*r7)**(S(1)/4)/(2*sqrt(6 + 3*r7)) +
r2*sqrt(6 + 3*r7)/(2*(5 + 2*r7)**(S(1)/4)),
sqrt(3 + 2*r3): 3**(S(3)/4)*(r6/2 + 3*r2/2)/3}
for i in d:
assert sqrtdenest(i) == d[i]
def test_sqrtdenest2():
assert sqrtdenest(sqrt(16 - 2*r29 + 2*sqrt(55 - 10*r29))) == \
r5 + sqrt(11 - 2*r29)
e = sqrt(-r5 + sqrt(-2*r29 + 2*sqrt(-10*r29 + 55) + 16))
assert sqrtdenest(e) == root(-2*r29 + 11, 4)
r = sqrt(1 + r7)
assert sqrtdenest(sqrt(1 + r)) == sqrt(1 + r)
e = sqrt(((1 + sqrt(1 + 2*sqrt(3 + r2 + r5)))**2).expand())
assert sqrtdenest(e) == 1 + sqrt(1 + 2*sqrt(r2 + r5 + 3))
assert sqrtdenest(sqrt(5*r3 + 6*r2)) == \
sqrt(2)*root(3, 4) + root(3, 4)**3
assert sqrtdenest(sqrt(((1 + r5 + sqrt(1 + r3))**2).expand())) == \
1 + r5 + sqrt(1 + r3)
assert sqrtdenest(sqrt(((1 + r5 + r7 + sqrt(1 + r3))**2).expand())) == \
1 + sqrt(1 + r3) + r5 + r7
e = sqrt(((1 + cos(2) + cos(3) + sqrt(1 + r3))**2).expand())
assert sqrtdenest(e) == cos(3) + cos(2) + 1 + sqrt(1 + r3)
e = sqrt(-2*r10 + 2*r2*sqrt(-2*r10 + 11) + 14)
assert sqrtdenest(e) == sqrt(-2*r10 - 2*r2 + 4*r5 + 14)
# check that the result is not more complicated than the input
z = sqrt(-2*r29 + cos(2) + 2*sqrt(-10*r29 + 55) + 16)
assert sqrtdenest(z) == z
assert sqrtdenest(sqrt(r6 + sqrt(15))) == sqrt(r6 + sqrt(15))
z = sqrt(15 - 2*sqrt(31) + 2*sqrt(55 - 10*r29))
assert sqrtdenest(z) == z
def test_sqrtdenest_rec():
assert sqrtdenest(sqrt(-4*sqrt(14) - 2*r6 + 4*sqrt(21) + 33)) == \
-r2 + r3 + 2*r7
assert sqrtdenest(sqrt(-28*r7 - 14*r5 + 4*sqrt(35) + 82)) == \
-7 + r5 + 2*r7
assert sqrtdenest(sqrt(6*r2/11 + 2*sqrt(22)/11 + 6*sqrt(11)/11 + 2)) == \
sqrt(11)*(r2 + 3 + sqrt(11))/11
assert sqrtdenest(sqrt(468*r3 + 3024*r2 + 2912*r6 + 19735)) == \
9*r3 + 26 + 56*r6
z = sqrt(-490*r3 - 98*sqrt(115) - 98*sqrt(345) - 2107)
assert sqrtdenest(z) == sqrt(-1)*(7*r5 + 7*r15 + 7*sqrt(23))
z = sqrt(-4*sqrt(14) - 2*r6 + 4*sqrt(21) + 34)
assert sqrtdenest(z) == z
assert sqrtdenest(sqrt(-8*r2 - 2*r5 + 18)) == -r10 + 1 + r2 + r5
assert sqrtdenest(sqrt(8*r2 + 2*r5 - 18)) == \
sqrt(-1)*(-r10 + 1 + r2 + r5)
assert sqrtdenest(sqrt(8*r2/3 + 14*r5/3 + S(154)/9)) == \
-r10/3 + r2 + r5 + 3
assert sqrtdenest(sqrt(sqrt(2*r6 + 5) + sqrt(2*r7 + 8))) == \
sqrt(1 + r2 + r3 + r7)
assert sqrtdenest(sqrt(4*r15 + 8*r5 + 12*r3 + 24)) == 1 + r3 + r5 + r15
w = 1 + r2 + r3 + r5 + r7
assert sqrtdenest(sqrt((w**2).expand())) == w
z = sqrt((w**2).expand() + 1)
assert sqrtdenest(z) == z
z = sqrt(2*r10 + 6*r2 + 4*r5 + 12 + 10*r15 + 30*r3)
assert sqrtdenest(z) == z
def test_issue_6241():
z = sqrt( -320 + 32*sqrt(5) + 64*r15)
assert sqrtdenest(z) == z
def test_sqrtdenest3():
z = sqrt(13 - 2*r10 + 2*r2*sqrt(-2*r10 + 11))
assert sqrtdenest(z) == -1 + r2 + r10
assert sqrtdenest(z, max_iter=1) == -1 + sqrt(2) + sqrt(10)
n = sqrt(2*r6/7 + 2*r7/7 + 2*sqrt(42)/7 + 2)
d = sqrt(16 - 2*r29 + 2*sqrt(55 - 10*r29))
assert sqrtdenest(n/d).equals(
r7*(1 + r6 + r7)/(7*(sqrt(-2*r29 + 11) + r5)))
z = sqrt(sqrt(r2 + 2) + 2)
assert sqrtdenest(z) == z
assert sqrtdenest(sqrt(-2*r10 + 4*r2*sqrt(-2*r10 + 11) + 20)) == \
sqrt(-2*r10 - 4*r2 + 8*r5 + 20)
assert sqrtdenest(sqrt((112 + 70*r2) + (46 + 34*r2)*r5)) == \
r10 + 5 + 4*r2 + 3*r5
z = sqrt(5 + sqrt(2*r6 + 5)*sqrt(-2*r29 + 2*sqrt(-10*r29 + 55) + 16))
r = sqrt(-2*r29 + 11)
assert sqrtdenest(z) == sqrt(r2*r + r3*r + r10 + r15 + 5)
def test_sqrtdenest4():
# see Denest_en.pdf in https://github.com/sympy/sympy/issues/3192
z = sqrt(8 - r2*sqrt(5 - r5) - sqrt(3)*(1 + r5))
z1 = sqrtdenest(z)
c = sqrt(-r5 + 5)
z1 = ((-r15*c - r3*c + c + r5*c - r6 - r2 + r10 + sqrt(30))/4).expand()
assert sqrtdenest(z) == z1
z = sqrt(2*r2*sqrt(r2 + 2) + 5*r2 + 4*sqrt(r2 + 2) + 8)
assert sqrtdenest(z) == r2 + sqrt(r2 + 2) + 2
w = 2 + r2 + r3 + (1 + r3)*sqrt(2 + r2 + 5*r3)
z = sqrt((w**2).expand())
assert sqrtdenest(z) == w.expand()
def test_sqrt_symbolic_denest():
x = Symbol('x')
z = sqrt(((1 + sqrt(sqrt(2 + x) + 3))**2).expand())
assert sqrtdenest(z) == sqrt((1 + sqrt(sqrt(2 + x) + 3))**2)
z = sqrt(((1 + sqrt(sqrt(2 + cos(1)) + 3))**2).expand())
assert sqrtdenest(z) == 1 + sqrt(sqrt(2 + cos(1)) + 3)
z = ((1 + cos(2))**4 + 1).expand()
assert sqrtdenest(z) == z
z = sqrt(((1 + sqrt(sqrt(2 + cos(3*x)) + 3))**2 + 1).expand())
assert sqrtdenest(z) == z
c = cos(3)
c2 = c**2
assert sqrtdenest(sqrt(2*sqrt(1 + r3)*c + c2 + 1 + r3*c2)) == \
-1 - sqrt(1 + r3)*c
ra = sqrt(1 + r3)
z = sqrt(20*ra*sqrt(3 + 3*r3) + 12*r3*ra*sqrt(3 + 3*r3) + 64*r3 + 112)
assert sqrtdenest(z) == z
def test_issue_5857():
from sympy.abc import x, y
z = sqrt(1/(4*r3 + 7) + 1)
ans = (r2 + r6)/(r3 + 2)
assert sqrtdenest(z) == ans
assert sqrtdenest(1 + z) == 1 + ans
assert sqrtdenest(Integral(z + 1, (x, 1, 2))) == \
Integral(1 + ans, (x, 1, 2))
assert sqrtdenest(x + sqrt(y)) == x + sqrt(y)
ans = (r2 + r6)/(r3 + 2)
assert sqrtdenest(z) == ans
assert sqrtdenest(1 + z) == 1 + ans
assert sqrtdenest(Integral(z + 1, (x, 1, 2))) == \
Integral(1 + ans, (x, 1, 2))
assert sqrtdenest(x + sqrt(y)) == x + sqrt(y)
def test_subsets():
assert subsets(1) == [[1]]
assert subsets(4) == [
[1, 0, 0, 0], [0, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 0], [1, 0, 1, 0],
[0, 1, 1, 0], [1, 1, 1, 0], [0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]
def test_issue_5653():
assert sqrtdenest(
sqrt(2 + sqrt(2 + sqrt(2)))) == sqrt(2 + sqrt(2 + sqrt(2)))
|
Shaps/ansible
|
refs/heads/devel
|
lib/ansible/modules/utilities/logic/set_stats.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible RedHat, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: set_stats
short_description: Set stats for the current ansible run
description:
- This module allows setting/accumulating stats on the current ansible run, either per host or for all hosts in the run.
- This module is also supported for Windows targets.
author: Brian Coca (@bcoca)
options:
data:
description:
- A dictionary of which each key represents a stat (or variable) you want to keep track of.
type: dict
required: true
per_host:
description:
- whether the stats are per host or for all hosts in the run.
type: bool
default: no
aggregate:
description:
- Whether the provided value is aggregated to the existing stat C(yes) or will replace it C(no).
type: bool
default: yes
notes:
- In order for custom stats to be displayed, you must set C(show_custom_stats) in C(ansible.cfg) or C(ANSIBLE_SHOW_CUSTOM_STATS) to C(yes).
- This module is also supported for Windows targets.
version_added: "2.3"
'''
EXAMPLES = r'''
# Aggregating packages_installed stat per host
- set_stats:
data:
packages_installed: 31
per_host: yes
# Aggregating random stats for all hosts using complex arguments
- set_stats:
data:
one_stat: 11
other_stat: "{{ local_var * 2 }}"
another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
per_host: no
# setting stats (not aggregating)
- set_stats:
data:
the_answer: 42
aggregate: no
'''
|
Teagan42/home-assistant
|
refs/heads/dev
|
homeassistant/components/plugwise/climate.py
|
2
|
"""Plugwise Climate component for Home Assistant."""
import logging
import haanna
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
_LOGGER = logging.getLogger(__name__)
# Configuration directives
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_LEGACY = "legacy_anna"
# Default directives
DEFAULT_NAME = "Plugwise Thermostat"
DEFAULT_USERNAME = "smile"
DEFAULT_TIMEOUT = 10
DEFAULT_PORT = 80
DEFAULT_ICON = "mdi:thermometer"
DEFAULT_MIN_TEMP = 4
DEFAULT_MAX_TEMP = 30
# HVAC modes
HVAC_MODES_1 = [HVAC_MODE_HEAT, HVAC_MODE_AUTO]
HVAC_MODES_2 = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO]
# Read platform configuration
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_LEGACY, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): cv.positive_int,
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Add the Plugwise (Anna) Thermostat."""
api = haanna.Haanna(
config[CONF_USERNAME],
config[CONF_PASSWORD],
config[CONF_HOST],
config[CONF_PORT],
config[CONF_LEGACY],
)
try:
api.ping_anna_thermostat()
except OSError:
_LOGGER.debug("Ping failed, retrying later", exc_info=True)
raise PlatformNotReady
devices = [
ThermostatDevice(
api, config[CONF_NAME], config[CONF_MIN_TEMP], config[CONF_MAX_TEMP]
)
]
add_entities(devices, True)
class ThermostatDevice(ClimateDevice):
"""Representation of the Plugwise thermostat."""
def __init__(self, api, name, min_temp, max_temp):
"""Set up the Plugwise API."""
self._api = api
self._min_temp = min_temp
self._max_temp = max_temp
self._name = name
self._direct_objects = None
self._domain_objects = None
self._outdoor_temperature = None
self._selected_schema = None
self._last_active_schema = None
self._preset_mode = None
self._presets = None
self._presets_list = None
self._boiler_status = None
self._heating_status = None
self._cooling_status = None
self._dhw_status = None
self._schema_names = None
self._schema_status = None
self._current_temperature = None
self._thermostat_temperature = None
self._boiler_temperature = None
self._water_pressure = None
self._schedule_temperature = None
self._hvac_mode = None
@property
def hvac_action(self):
"""Return the current hvac action."""
if self._heating_status or self._boiler_status or self._dhw_status:
return CURRENT_HVAC_HEAT
if self._cooling_status:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
return DEFAULT_ICON
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attributes = {}
if self._outdoor_temperature:
attributes["outdoor_temperature"] = self._outdoor_temperature
if self._schema_names:
attributes["available_schemas"] = self._schema_names
if self._selected_schema:
attributes["selected_schema"] = self._selected_schema
if self._boiler_temperature:
attributes["boiler_temperature"] = self._boiler_temperature
if self._water_pressure:
attributes["water_pressure"] = self._water_pressure
return attributes
@property
def preset_modes(self):
"""Return the available preset modes list.
And make the presets with their temperatures available.
"""
return self._presets_list
@property
def hvac_modes(self):
"""Return the available hvac modes list."""
if self._heating_status is not None or self._boiler_status is not None:
if self._cooling_status is not None:
return HVAC_MODES_2
return HVAC_MODES_1
return None
@property
def hvac_mode(self):
"""Return current active hvac state."""
if self._schema_status:
return HVAC_MODE_AUTO
if self._heating_status or self._boiler_status or self._dhw_status:
if self._cooling_status:
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@property
def target_temperature(self):
"""Return the target_temperature.
From the XML the thermostat-value is used because it updates 'immediately'
compared to the target_temperature-value. This way the information on the card
is "immediately" updated after changing the preset, temperature, etc.
"""
return self._thermostat_temperature
@property
def preset_mode(self):
"""Return the active selected schedule-name.
Or, return the active preset, or return Temporary in case of a manual change
in the set-temperature with a weekschedule active.
Or return Manual in case of a manual change and no weekschedule active.
"""
if self._presets:
presets = self._presets
preset_temperature = presets.get(self._preset_mode, "none")
if self.hvac_mode == HVAC_MODE_AUTO:
if self._thermostat_temperature == self._schedule_temperature:
return "{}".format(self._selected_schema)
if self._thermostat_temperature == preset_temperature:
return self._preset_mode
return "Temporary"
if self._thermostat_temperature != preset_temperature:
return "Manual"
return self._preset_mode
return None
@property
def current_temperature(self):
"""Return the current room temperature."""
return self._current_temperature
@property
def min_temp(self):
"""Return the minimal temperature possible to set."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature possible to set."""
return self._max_temp
@property
def temperature_unit(self):
"""Return the unit of measured temperature."""
return TEMP_CELSIUS
def set_temperature(self, **kwargs):
"""Set new target temperature."""
_LOGGER.debug("Adjusting temperature")
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is not None and self._min_temp < temperature < self._max_temp:
_LOGGER.debug("Changing temporary temperature")
self._api.set_temperature(self._domain_objects, temperature)
else:
_LOGGER.error("Invalid temperature requested")
def set_hvac_mode(self, hvac_mode):
"""Set the hvac mode."""
_LOGGER.debug("Adjusting hvac_mode (i.e. schedule/schema)")
schema_mode = "false"
if hvac_mode == HVAC_MODE_AUTO:
schema_mode = "true"
self._api.set_schema_state(
self._domain_objects, self._last_active_schema, schema_mode
)
def set_preset_mode(self, preset_mode):
"""Set the preset mode."""
_LOGGER.debug("Changing preset mode")
self._api.set_preset(self._domain_objects, preset_mode)
def update(self):
"""Update the data from the thermostat."""
_LOGGER.debug("Update called")
self._direct_objects = self._api.get_direct_objects()
self._domain_objects = self._api.get_domain_objects()
self._outdoor_temperature = self._api.get_outdoor_temperature(
self._domain_objects
)
self._selected_schema = self._api.get_active_schema_name(self._domain_objects)
self._last_active_schema = self._api.get_last_active_schema_name(
self._domain_objects
)
self._preset_mode = self._api.get_current_preset(self._domain_objects)
self._presets = self._api.get_presets(self._domain_objects)
self._presets_list = list(self._api.get_presets(self._domain_objects))
self._boiler_status = self._api.get_boiler_status(self._direct_objects)
self._heating_status = self._api.get_heating_status(self._direct_objects)
self._cooling_status = self._api.get_cooling_status(self._direct_objects)
self._dhw_status = self._api.get_domestic_hot_water_status(self._direct_objects)
self._schema_names = self._api.get_schema_names(self._domain_objects)
self._schema_status = self._api.get_schema_state(self._domain_objects)
self._current_temperature = self._api.get_current_temperature(
self._domain_objects
)
self._thermostat_temperature = self._api.get_thermostat_temperature(
self._domain_objects
)
self._schedule_temperature = self._api.get_schedule_temperature(
self._domain_objects
)
self._boiler_temperature = self._api.get_boiler_temperature(
self._domain_objects
)
self._water_pressure = self._api.get_water_pressure(self._domain_objects)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.