code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
../../../../share/pyshared/zeitgeist/datamodel.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/zeitgeist/datamodel.py | Python | gpl-3.0 | 49 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_codecommit
version_added: "2.8"
short_description: Manage repositories in AWS CodeCommit
description:
- Supports creation and deletion of CodeCommit repositories.
- See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
author: Shuang Wang (@ptux)
requirements:
- botocore
- boto3
- python >= 2.6
options:
name:
description:
- name of repository.
required: true
comment:
description:
- description or comment of repository.
required: false
state:
description:
- Specifies the state of repository.
required: true
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
repository_metadata:
description: "Information about the repository."
returned: always
type: complex
contains:
account_id:
description: "The ID of the AWS account associated with the repository."
returned: when state is present
type: str
sample: "268342293637"
arn:
description: "The Amazon Resource Name (ARN) of the repository."
returned: when state is present
type: str
sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username"
clone_url_http:
description: "The URL to use for cloning the repository over HTTPS."
returned: when state is present
type: str
sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
clone_url_ssh:
description: "The URL to use for cloning the repository over SSH."
returned: when state is present
type: str
sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
creation_date:
description: "The date and time the repository was created, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
last_modified_date:
description: "The date and time the repository was last modified, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
repository_description:
description: "A comment or description about the repository."
returned: when state is present
type: str
sample: "test from ptux"
repository_id:
description: "The ID of the repository that was created or deleted"
returned: always
type: str
sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
repository_name:
description: "The repository's name."
returned: when state is present
type: str
sample: "reponame"
response_metadata:
description: "Information about the response."
returned: always
type: complex
contains:
http_headers:
description: "http headers of http response"
returned: always
type: dict
http_status_code:
description: "http status code of http response"
returned: always
type: str
sample: "200"
request_id:
description: "http request id"
returned: always
type: str
sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
retry_attempts:
description: "numbers of retry attempts"
returned: always
type: str
sample: "0"
'''
EXAMPLES = '''
# Create a new repository
- aws_codecommit:
name: repo
state: present
# Delete a repository
- aws_codecommit:
name: repo
state: absent
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class CodeCommit(object):
def __init__(self, module=None):
self._module = module
self._client = self._module.client('codecommit')
self._check_mode = self._module.check_mode
def process(self):
result = dict(changed=False)
if self._module.params['state'] == 'present' and not self._repository_exists():
if not self._module.check_mode:
result = self._create_repository()
result['changed'] = True
if self._module.params['state'] == 'absent' and self._repository_exists():
if not self._module.check_mode:
result = self._delete_repository()
result['changed'] = True
return result
def _repository_exists(self):
try:
paginator = self._client.get_paginator('list_repositories')
for page in paginator.paginate():
repositories = page['repositories']
for item in repositories:
if self._module.params['name'] in item.values():
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
return False
def _create_repository(self):
try:
result = self._client.create_repository(
repositoryName=self._module.params['name'],
repositoryDescription=self._module.params['comment']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
return result
def _delete_repository(self):
try:
result = self._client.delete_repository(
repositoryName=self._module.params['name']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't delete repository")
return result
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(choices=['present', 'absent'], required=True),
comment=dict(default='')
)
ansible_aws_module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
aws_codecommit = CodeCommit(module=ansible_aws_module)
result = aws_codecommit.process()
ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()
| kvar/ansible | lib/ansible/modules/cloud/amazon/aws_codecommit.py | Python | gpl-3.0 | 6,565 |
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import sys
#NOTE: if you want to develop askbot
#you might want to install django-debug-toolbar as well
import askbot
setup(
name = "askbot",
version = askbot.get_version(),#remember to manually set this correctly
description = 'Exercise and Problem forum, like StackOverflow, written in python and Django',
packages = find_packages(),
author = 'Evgeny.Fadeev',
author_email = 'evgeny.fadeev@gmail.com',
license = 'GPLv3',
keywords = 'forum, community, wiki, Q&A',
entry_points = {
'console_scripts' : [
'askbot-setup = askbot.deployment:askbot_setup',
]
},
url = 'http://askbot.org',
include_package_data = True,
install_requires = askbot.REQUIREMENTS.values(),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: Finnish',
'Natural Language :: German',
'Natural Language :: Russian',
'Natural Language :: Serbian',
'Natural Language :: Turkish',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: JavaScript',
'Topic :: Communications :: Usenet News',
'Topic :: Communications :: Email :: Mailing List Servers',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
long_description = """Askbot will work alone or with other django apps (with some limitations, please see below), Django 1.1.1 - 1.2.3(*), MySQL(**) and PostgresQL(recommended) (>=8.3).
Exercises? Suggestions? Found a bug? -> please post at http://askbot.org/
Features
========
* standard Q&A functionalities including votes, reputation system, etc.
* user levels: admin, moderator, regular, suspended, blocked
* per-user inbox for responses & flagged items (for moderators)
* email alerts - instant and delayed, optionally tag filtered
* search by full text and a set of tags simultaneously
* can import data from stackexchange database file
Installation
============
The general steps are:
* install the code
* if there is no database yet - create one
* create a new or configure existing django site for askbot
* create/update the database tables
Methods to install code
-----------------------
* **pip install askbot**
* **easy_install askbot**
* **download .tar.gz** file from the bottom of this page, then run **python setup.py install**
* clone code from the github **git clone git://github.com/ASKBOT/askbot-devel.git**, and then **python setup.py develop**
Create/configure django site
----------------------------
Either run command **askbot-setup** or merge contents of directory **askbot/setup_templates** in the source code into your project directory.
Create/update database tables
-----------------------------
Back up your database if it is not blank, then two commands:
* python manage.py syncdb
* python manage.py migrate
There are two apps to migrate - askbot and django_authopenid (a forked version of the original, included within askbot), so you can as well migrate them separately
Limitations
===========
There are some limitations that will be removed in the future. If any of these cause issues - please do not hesitate to contact admin@askbot.org.
Askbot patches `auth_user` table. The migration script will automatically add missing columns, however it will not overwrite any existing columns. Please do back up your database before adding askbot to an existing site.
Included into askbot there are two forked apps: `django_authopenid` and `livesettings`. If you have these apps on your site, you may have trouble installing askbot.
User registration and login system is bundled with Askbot. It is quite good though, it allows logging in with password and many authentication service providers, including popular social services and recover account by email.
If there are any other collisions, askbot will simply fail to install, it will not damage your data.
Background Information
======================
Askbot is based on CNPROG project by Mike Chen and Sailing Cai, project which was originally inspired by StackOverflow and Yahoo Problems.
Footnotes
=========
(*) - If you want to install with django 1.2.x a dependency "Coffin-0.3" needs to be replaced with "Coffin-0.3.3" - this will be automated in the future versions of the setup script.
(**) - With MySQL you have to use MyISAM data backend, because it's the only one that supports Full Text Search."""
)
print """**************************************************************
* *
* Thanks for installing Askbot. *
* *
* To start deploying type: askbot-setup *
* Please take a look at the manual askbot/doc/INSTALL *
* And please do not hesitate to ask your questions at *
* at http://askbot.org *
* *
**************************************************************"""
| maxwward/SCOPEBak | setup.py | Python | gpl-3.0 | 5,541 |
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy.optimize import nonlin
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
SOLVERS = [nonlin.anderson, nonlin.diagbroyden, nonlin.linearmixing,
nonlin.excitingmixing, nonlin.broyden1, nonlin.broyden2,
nonlin.newton_krylov]
MUST_WORK = [nonlin.anderson, nonlin.broyden1, nonlin.broyden2,
nonlin.newton_krylov]
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = []
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = [nonlin.linearmixing, nonlin.excitingmixing]
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = []
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = [nonlin.linearmixing, nonlin.excitingmixing,
nonlin.diagbroyden]
from test_minpack import TestFSolve as F5_class
F5_object = F5_class()
def F5(x):
return F5_object.pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = [nonlin.excitingmixing, nonlin.linearmixing, nonlin.diagbroyden]
def F6(x):
x1, x2 = x
J0 = np.array([[ -4.256 , 14.7 ],
[ 0.8394989 , 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = [nonlin.excitingmixing, nonlin.linearmixing, nonlin.diagbroyden]
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem(self):
for f in [F, F2, F3, F4_powell, F5, F6]:
for func in SOLVERS:
if func in f.KNOWN_BAD:
if func in MUST_WORK:
yield self._check_func_fail, f, func
continue
yield self._check_func, f, func
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, b*0, jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4)
class TestNonlinOldTests(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x= nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x= nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x= nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x= nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x= nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
if __name__ == "__main__":
run_module_suite()
| ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/optimize/tests/test_nonlin.py | Python | gpl-3.0 | 11,990 |
"""News source to send a notification whenever a twitch streamer goes live."""
import datetime
import logging
import discord
from dateutil import parser
from .AbstractSources import DataBasedSource
DOZER_LOGGER = logging.getLogger('dozer')
class TwitchSource(DataBasedSource):
"""News source to send a notification whenever a twitch streamer goes live."""
full_name = "Twitch"
short_name = "twitch"
base_url = "https://twitch.tv"
description = "Makes a post whenever a specified user goes live on Twitch"
token_url = "https://id.twitch.tv/oauth2/token"
api_url = "https://api.twitch.tv/helix"
color = discord.Color.from_rgb(145, 70, 255)
class TwitchUser(DataBasedSource.DataPoint):
"""A helper class to represent a single Twitch streamer"""
def __init__(self, user_id, display_name, profile_image_url, login):
super().__init__(login, display_name)
self.user_id = user_id
self.display_name = display_name
self.profile_image_url = profile_image_url
self.login = login
def __init__(self, aiohttp_session, bot):
super().__init__(aiohttp_session, bot)
self.access_token = None
self.client_id = None
self.expiry_time = None
self.users = {}
self.seen_streams = set()
async def get_token(self):
"""Use OAuth2 to request a new token. If token fails, disable the source."""
client_id = self.bot.config['news']['twitch']['client_id']
self.client_id = client_id
client_secret = self.bot.config['news']['twitch']['client_secret']
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials'
}
response = await self.http_session.post(self.token_url, params=params)
response = await response.json()
try:
self.access_token = response['access_token']
except KeyError:
DOZER_LOGGER.critical(f"Error in {self.full_name} Token Get: {response['message']}")
self.disabled = True
return
expiry_seconds = response['expires_in']
time_delta = datetime.timedelta(seconds=expiry_seconds)
self.expiry_time = datetime.datetime.now() + time_delta
async def request(self, url, *args, headers=None, **kwargs):
"""Make a OAuth2 verified request to a API Endpoint"""
if headers is None:
headers = {'Authorization': f"Bearer {self.access_token}",
"Client-ID": self.client_id}
else:
headers['Authorization'] = f"Bearer {self.access_token}"
url = f"{self.api_url}/{url}"
response = await self.http_session.get(url, headers=headers, *args, **kwargs)
if response.status == 401:
if 'WWW-Authenticate' in response.headers:
DOZER_LOGGER.info("Twitch token expired when request made, request new token and retrying.")
await self.get_token()
return await self.request(url, headers=headers, *args, **kwargs)
json = await response.json()
return json
async def first_run(self, data=None):
"""Make sure we have a token, then verify and add all the current users in the DB"""
await self.get_token()
if not data:
return
params = []
for login in data:
params.append(('login', login))
json = await self.request("users", params=params)
for user in json['data']:
user_obj = TwitchSource.TwitchUser(user['id'], user['display_name'], user['profile_image_url'],
user['login'])
self.users[user['id']] = user_obj
async def clean_data(self, text):
"""Request user data from Twitch to verify the username exists and clean the data"""
try:
user_obj = self.users[text]
except KeyError:
json = await self.request('users', params={'login': text})
if len(json['data']) == 0:
raise DataBasedSource.InvalidDataException("No user with that login name found")
elif len(json['data']) > 1:
raise DataBasedSource.InvalidDataException("More than one user with that login name found")
user_obj = TwitchSource.TwitchUser(json['data'][0]['id'], json['data'][0]['display_name'],
json['data'][0]['profile_image_url'], json['data'][0]['login'])
return user_obj
async def add_data(self, obj):
"""Add the user object to the store"""
self.users[obj.user_id] = obj
return True
async def remove_data(self, obj):
"""Remove the user object from the store"""
try:
del self.users[obj.user_id]
return True
except KeyError:
return False
async def get_new_posts(self):
"""Assemble all the current user IDs, get any game names and return the embeds and strings"""
if datetime.datetime.now() > self.expiry_time:
DOZER_LOGGER.info("Refreshing Twitch token due to expiry time")
await self.get_token()
params = []
for user in self.users.values():
params.append(('user_id', user.user_id))
params.append(('first', len(self.users)))
json = await self.request("streams", params=params)
if len(json['data']) == 0:
return {}
# streams endpoint only returns game ID, do a second request to get game names
game_ids = []
for stream in json['data']:
game_ids.append(stream['game_id'])
params = []
for game in game_ids:
params.append(('id', game))
games_json = await self.request("games", params=params)
games = {}
for game in games_json['data']:
games[game['id']] = game['name']
posts = {}
for stream in json['data']:
if stream['id'] not in self.seen_streams:
embed = self.generate_embed(stream, games)
plain = self.generate_plain_text(stream, games)
posts[stream['user_name']] = {
'embed': [embed],
'plain': [plain]
}
self.seen_streams.add(stream['id'])
return posts
def generate_embed(self, data, games):
"""Given data on a stream and a dict of games, assemble an embed"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
embed = discord.Embed()
embed.title = f"{display_name} is now live on Twitch!"
embed.colour = self.color
embed.description = data['title']
embed.url = f"https://www.twitch.tv/{data['user_name']}"
embed.add_field(name="Playing", value=games[data['game_id']], inline=True)
embed.add_field(name="Watching", value=data['viewer_count'], inline=True)
embed.set_author(name=display_name, url=embed.url, icon_url=self.users[data['user_id']].profile_image_url)
embed.set_image(url=data['thumbnail_url'].format(width=1920, height=1080))
start_time = parser.isoparse(data['started_at'])
embed.timestamp = start_time
return embed
def generate_plain_text(self, data, games):
"""Given data on a stream and a dict of games, assemble a string"""
try:
display_name = data['display_name']
except KeyError:
display_name = data['user_name']
return f"{display_name} is now live on Twitch!\n" \
f"Playing {games[data['game_id']]} with {data['viewer_count']} currently watching\n" \
f"Watch at https://www.twitch.tv/{data['user_name']}"
| guineawheek/Dozer | dozer/sources/TwitchSource.py | Python | gpl-3.0 | 7,912 |
#!/usr/bin/python
'''
Created on Sep 18, 2009
@author: sgallagh
'''
import unittest
import os
from stat import *
import sys
srcdir = os.getenv('srcdir')
if srcdir:
sys.path.insert(0, "./src/config")
srcdir = srcdir + "/src/config"
else:
srcdir = "."
import SSSDConfig
class SSSDConfigTestValid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Validate services
services = sssdconfig.list_services()
self.assertTrue('sssd' in services)
self.assertTrue('nss' in services)
self.assertTrue('pam' in services)
#Verify service attributes
sssd_service = sssdconfig.get_service('sssd')
service_opts = sssd_service.list_options()
self.assertTrue('services' in service_opts.keys())
service_list = sssd_service.get_option('services')
self.assertTrue('nss' in service_list)
self.assertTrue('pam' in service_list)
self.assertTrue('domains' in service_opts)
self.assertTrue('reconnection_retries' in service_opts)
del sssdconfig
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
sssdconfig.delete_service('sssd')
new_sssd_service = sssdconfig.new_service('sssd');
new_options = new_sssd_service.list_options();
self.assertTrue('debug_level' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('command' in new_options)
self.assertEquals(new_options['command'][0], str)
self.assertTrue('reconnection_retries' in new_options)
self.assertEquals(new_options['reconnection_retries'][0], int)
self.assertTrue('services' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('domains' in new_options)
self.assertEquals(new_options['domains'][0], list)
self.assertEquals(new_options['domains'][1], str)
self.assertTrue('sbus_timeout' in new_options)
self.assertEquals(new_options['sbus_timeout'][0], int)
self.assertTrue('re_expression' in new_options)
self.assertEquals(new_options['re_expression'][0], str)
self.assertTrue('full_name_format' in new_options)
self.assertEquals(new_options['full_name_format'][0], str)
self.assertTrue('default_domain_suffix' in new_options)
self.assertEquals(new_options['default_domain_suffix'][0], str)
del sssdconfig
def testDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
#Validate domain list
domains = sssdconfig.list_domains()
self.assertTrue('LOCAL' in domains)
self.assertTrue('LDAP' in domains)
self.assertTrue('PROXY' in domains)
self.assertTrue('IPA' in domains)
#Verify domain attributes
ipa_domain = sssdconfig.get_domain('IPA')
domain_opts = ipa_domain.list_options()
self.assertTrue('debug_level' in domain_opts.keys())
self.assertTrue('id_provider' in domain_opts.keys())
self.assertTrue('auth_provider' in domain_opts.keys())
del sssdconfig
def testListProviders(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
junk_domain = sssdconfig.new_domain('junk')
providers = junk_domain.list_providers()
self.assertTrue('ldap' in providers.keys())
def testCreateNewLocalConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
local_domain = sssdconfig.new_domain('LOCAL')
local_domain.add_provider('local', 'id')
local_domain.set_option('debug_level', 1)
local_domain.set_option('default_shell', '/bin/tcsh')
local_domain.set_active(True)
sssdconfig.save_domain(local_domain)
of = '/tmp/testCreateNewLocalConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testCreateNewLDAPConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
ldap_domain = sssdconfig.new_domain('LDAP')
ldap_domain.add_provider('ldap', 'id')
ldap_domain.set_option('debug_level', 1)
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testCreateNewLDAPConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testModifyExistingConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
ldap_domain.set_option('debug_level', 3)
ldap_domain.remove_provider('auth')
ldap_domain.add_provider('krb5', 'auth')
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testModifyExistingConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testSpaces(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
self.assertEqual(ldap_domain.get_option('auth_provider'), 'ldap')
self.assertEqual(ldap_domain.get_option('id_provider'), 'ldap')
class SSSDConfigTestInvalid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBadBool(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-invalid-badbool.conf")
self.assertRaises(TypeError,
sssdconfig.get_domain,'IPA')
class SSSDConfigTestSSSDService(unittest.TestCase):
def setUp(self):
self.schema = SSSDConfig.SSSDConfigSchema(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
def tearDown(self):
pass
def testInit(self):
# Positive test
service = SSSDConfig.SSSDService('sssd', self.schema)
# Type Error test
# Name is not a string
self.assertRaises(TypeError, SSSDConfig.SSSDService, 3, self.schema)
# TypeError test
# schema is not an SSSDSchema
self.assertRaises(TypeError, SSSDConfig.SSSDService, '3', self)
# ServiceNotRecognizedError test
self.assertRaises(SSSDConfig.ServiceNotRecognizedError,
SSSDConfig.SSSDService, 'ssd', self.schema)
def testListOptions(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
options = service.list_options()
control_list = [
'services',
'domains',
'timeout',
'force_timeout',
'sbus_timeout',
're_expression',
'full_name_format',
'krb5_rcache_dir',
'default_domain_suffix',
'debug_level',
'debug_timestamps',
'debug_microseconds',
'debug_to_files',
'command',
'reconnection_retries',
'fd_limit',
'client_idle_timeout',
'description']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['reconnection_retries']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['reconnection_retries'][0] == int,
"reconnection_retries should require an int. " +
"list_options is requiring a %s" %
options['reconnection_retries'][0])
self.assertTrue(options['reconnection_retries'][1] == None,
"reconnection_retries should not require a subtype. " +
"list_options is requiring a %s" %
options['reconnection_retries'][1])
self.assertTrue(options['reconnection_retries'][3] == None,
"reconnection_retries should have no default")
self.assertTrue(type(options['services']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['services'][0] == list,
"services should require an list. " +
"list_options is requiring a %s" %
options['services'][0])
self.assertTrue(options['services'][1] == str,
"services should require a subtype of str. " +
"list_options is requiring a %s" %
options['services'][1])
def testListMandatoryOptions(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
options = service.list_mandatory_options()
control_list = [
'services',
'domains']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['services']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['services'][0] == list,
"services should require an list. " +
"list_options is requiring a %s" %
options['services'][0])
self.assertTrue(options['services'][1] == str,
"services should require a subtype of str. " +
"list_options is requiring a %s" %
options['services'][1])
def testSetOption(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
# Positive test - Exactly right
service.set_option('debug_level', 2)
self.assertEqual(service.get_option('debug_level'), 2)
# Positive test - Allow converting "safe" values
service.set_option('debug_level', '2')
self.assertEqual(service.get_option('debug_level'), 2)
# Positive test - Remove option if value is None
service.set_option('debug_level', None)
self.assertTrue('debug_level' not in service.options.keys())
# Negative test - Nonexistent Option
self.assertRaises(SSSDConfig.NoOptionError, service.set_option, 'nosuchoption', 1)
# Negative test - Incorrect type
self.assertRaises(TypeError, service.set_option, 'debug_level', 'two')
def testGetOption(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
# Positive test - Single-valued
self.assertEqual(service.get_option('config_file_version'), 2)
# Positive test - List of values
self.assertEqual(service.get_option('services'), ['nss', 'pam'])
# Negative Test - Bad Option
self.assertRaises(SSSDConfig.NoOptionError, service.get_option, 'nosuchoption')
def testGetAllOptions(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
#Positive test
options = service.get_all_options()
control_list = [
'config_file_version',
'services']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
def testRemoveOption(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
# Positive test - Remove an option that exists
self.assertEqual(service.get_option('services'), ['nss', 'pam'])
service.remove_option('services')
self.assertRaises(SSSDConfig.NoOptionError, service.get_option, 'debug_level')
# Positive test - Remove an option that doesn't exist
self.assertRaises(SSSDConfig.NoOptionError, service.get_option, 'nosuchentry')
service.remove_option('nosuchentry')
class SSSDConfigTestSSSDDomain(unittest.TestCase):
def setUp(self):
self.schema = SSSDConfig.SSSDConfigSchema(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
def tearDown(self):
pass
def testInit(self):
# Positive Test
domain = SSSDConfig.SSSDDomain('mydomain', self.schema)
# Negative Test - Name not a string
self.assertRaises(TypeError, SSSDConfig.SSSDDomain, 2, self.schema)
# Negative Test - Schema is not an SSSDSchema
self.assertRaises(TypeError, SSSDConfig.SSSDDomain, 'mydomain', self)
def testGetName(self):
# Positive Test
domain = SSSDConfig.SSSDDomain('mydomain', self.schema)
self.assertEqual(domain.get_name(), 'mydomain')
def testSetActive(self):
#Positive Test
domain = SSSDConfig.SSSDDomain('mydomain', self.schema)
# Should default to inactive
self.assertFalse(domain.active)
domain.set_active(True)
self.assertTrue(domain.active)
domain.set_active(False)
self.assertFalse(domain.active)
def testListOptions(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# First test default options
options = domain.list_options()
control_list = [
'description',
'debug_level',
'debug_timestamps',
'min_id',
'max_id',
'timeout',
'force_timeout',
'offline_timeout',
'try_inotify',
'command',
'enumerate',
'cache_credentials',
'store_legacy_passwords',
'use_fully_qualified_names',
'ignore_group_members',
'filter_users',
'filter_groups',
'entry_cache_timeout',
'entry_cache_user_timeout',
'entry_cache_group_timeout',
'entry_cache_netgroup_timeout',
'entry_cache_service_timeout',
'entry_cache_autofs_timeout',
'entry_cache_sudo_timeout',
'refresh_expired_interval',
'lookup_family_order',
'account_cache_expiration',
'dns_resolver_timeout',
'dns_discovery_domain',
'dyndns_update',
'dyndns_ttl',
'dyndns_iface',
'dyndns_refresh_interval',
'dyndns_update_ptr',
'dyndns_force_tcp',
'dyndns_auth',
'subdomain_enumerate',
'override_gid',
'case_sensitive',
'override_homedir',
'fallback_homedir',
'override_shell',
'default_shell',
'pwd_expiration_warning',
'id_provider',
'auth_provider',
'access_provider',
'chpass_provider',
'sudo_provider',
'autofs_provider',
'session_provider',
'hostid_provider',
'subdomains_provider',
'realmd_tags',
'subdomain_refresh_interval']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['max_id']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['max_id'][0] == int,
"max_id should require an int. " +
"list_options is requiring a %s" %
options['max_id'][0])
self.assertTrue(options['max_id'][1] == None,
"max_id should not require a subtype. " +
"list_options is requiring a %s" %
options['max_id'][1])
# Add a provider and verify that the new options appear
domain.add_provider('local', 'id')
control_list.extend(
['default_shell',
'base_directory',
'create_homedir',
'remove_homedir',
'homedir_umask',
'skel_dir',
'mail_dir',
'userdel_cmd'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Add a provider that has global options and verify that
# The new options appear.
domain.add_provider('krb5', 'auth')
backup_list = control_list[:]
control_list.extend(
['krb5_server',
'krb5_backup_server',
'krb5_realm',
'krb5_kpasswd',
'krb5_backup_kpasswd',
'krb5_ccachedir',
'krb5_ccname_template',
'krb5_keytab',
'krb5_validate',
'krb5_store_password_if_offline',
'krb5_auth_timeout',
'krb5_renewable_lifetime',
'krb5_lifetime',
'krb5_renew_interval',
'krb5_use_fast',
'krb5_fast_principal',
'krb5_canonicalize',
'krb5_use_enterprise_principal',
'krb5_use_kdcinfo'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
control_list.extend(['krb5_kdcip'])
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Remove the auth domain and verify that the options
# revert to the backup_list
domain.remove_provider('auth')
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in backup_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in backup_list,
'Option [%s] unexpectedly found' %
option)
def testListMandatoryOptions(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# First test default options
options = domain.list_mandatory_options()
control_list = ['id_provider']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Add a provider that has global options and verify that
# The new options appear.
domain.add_provider('krb5', 'auth')
backup_list = control_list[:]
control_list.extend(['krb5_realm'])
options = domain.list_mandatory_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Remove the auth domain and verify that the options
# revert to the backup_list
domain.remove_provider('auth')
options = domain.list_mandatory_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in backup_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in backup_list,
'Option [%s] unexpectedly found' %
option)
def testListProviders(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
control_provider_dict = {
'ipa': ['id', 'auth', 'access', 'chpass', 'sudo', 'autofs',
'session', 'hostid', 'subdomains'],
'ad': ['id', 'auth', 'access', 'chpass', 'sudo', 'subdomains'],
'local': ['id', 'auth', 'chpass'],
'ldap': ['id', 'auth', 'access', 'chpass', 'sudo', 'autofs'],
'krb5': ['auth', 'access', 'chpass'],
'proxy': ['id', 'auth', 'chpass'],
'simple': ['access'],
'permit': ['access'],
'deny': ['access']}
providers = domain.list_providers()
# Ensure that all of the expected defaults are there
for provider in control_provider_dict.keys():
for ptype in control_provider_dict[provider]:
self.assertTrue(providers.has_key(provider))
self.assertTrue(ptype in providers[provider])
for provider in providers.keys():
for ptype in providers[provider]:
self.assertTrue(control_provider_dict.has_key(provider))
self.assertTrue(ptype in control_provider_dict[provider])
def testListProviderOptions(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Test looking up a specific provider type
options = domain.list_provider_options('krb5', 'auth')
control_list = [
'krb5_server',
'krb5_backup_server',
'krb5_kdcip',
'krb5_realm',
'krb5_kpasswd',
'krb5_backup_kpasswd',
'krb5_ccachedir',
'krb5_ccname_template',
'krb5_keytab',
'krb5_validate',
'krb5_store_password_if_offline',
'krb5_auth_timeout',
'krb5_renewable_lifetime',
'krb5_lifetime',
'krb5_renew_interval',
'krb5_use_fast',
'krb5_fast_principal',
'krb5_canonicalize',
'krb5_use_enterprise_principal',
'krb5_use_kdcinfo']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
#Test looking up all provider values
options = domain.list_provider_options('krb5')
control_list.extend(['krb5_kpasswd'])
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
def testAddProvider(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive Test
domain.add_provider('local', 'id')
# Negative Test - No such backend type
self.assertRaises(SSSDConfig.NoSuchProviderError,
domain.add_provider, 'nosuchbackend', 'auth')
# Negative Test - No such backend subtype
self.assertRaises(SSSDConfig.NoSuchProviderSubtypeError,
domain.add_provider, 'ldap', 'nosuchsubtype')
# Negative Test - Try to add a second provider of the same type
self.assertRaises(SSSDConfig.ProviderSubtypeInUse,
domain.add_provider, 'ldap', 'id')
def testRemoveProvider(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# First test default options
options = domain.list_options()
control_list = [
'description',
'debug_level',
'debug_timestamps',
'min_id',
'max_id',
'timeout',
'force_timeout',
'offline_timeout',
'try_inotify',
'command',
'enumerate',
'cache_credentials',
'store_legacy_passwords',
'use_fully_qualified_names',
'ignore_group_members',
'filter_users',
'filter_groups',
'entry_cache_timeout',
'entry_cache_user_timeout',
'entry_cache_group_timeout',
'entry_cache_netgroup_timeout',
'entry_cache_service_timeout',
'entry_cache_autofs_timeout',
'entry_cache_sudo_timeout',
'refresh_expired_interval',
'account_cache_expiration',
'lookup_family_order',
'dns_resolver_timeout',
'dns_discovery_domain',
'dyndns_update',
'dyndns_ttl',
'dyndns_iface',
'dyndns_refresh_interval',
'dyndns_update_ptr',
'dyndns_force_tcp',
'dyndns_auth',
'subdomain_enumerate',
'override_gid',
'case_sensitive',
'override_homedir',
'fallback_homedir',
'override_shell',
'default_shell',
'pwd_expiration_warning',
'id_provider',
'auth_provider',
'access_provider',
'chpass_provider',
'sudo_provider',
'autofs_provider',
'session_provider',
'hostid_provider',
'subdomains_provider',
'realmd_tags',
'subdomain_refresh_interval']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['max_id']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['max_id'][0] == int,
"config_file_version should require an int. " +
"list_options is requiring a %s" %
options['max_id'][0])
self.assertTrue(options['max_id'][1] == None,
"config_file_version should not require a subtype. " +
"list_options is requiring a %s" %
options['max_id'][1])
# Add a provider and verify that the new options appear
domain.add_provider('local', 'id')
control_list.extend(
['default_shell',
'base_directory',
'create_homedir',
'remove_homedir',
'homedir_umask',
'skel_dir',
'mail_dir',
'userdel_cmd'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Add a provider that has global options and verify that
# The new options appear.
domain.add_provider('krb5', 'auth')
backup_list = control_list[:]
control_list.extend(
['krb5_server',
'krb5_backup_server',
'krb5_kdcip',
'krb5_realm',
'krb5_kpasswd',
'krb5_backup_kpasswd',
'krb5_ccachedir',
'krb5_ccname_template',
'krb5_keytab',
'krb5_validate',
'krb5_store_password_if_offline',
'krb5_auth_timeout',
'krb5_renewable_lifetime',
'krb5_lifetime',
'krb5_renew_interval',
'krb5_use_fast',
'krb5_fast_principal',
'krb5_canonicalize',
'krb5_use_enterprise_principal',
'krb5_use_kdcinfo'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Remove the local ID provider and add an LDAP one
# LDAP ID providers can also use the krb5_realm
domain.remove_provider('id')
self.assertFalse(domain.options.has_key('id_provider'))
domain.add_provider('ldap', 'id')
# Set the krb5_realm option and the ldap_uri option
domain.set_option('krb5_realm', 'EXAMPLE.COM')
domain.set_option('ldap_uri', 'ldap://ldap.example.com')
self.assertEquals(domain.get_option('krb5_realm'),
'EXAMPLE.COM')
self.assertEquals(domain.get_option('ldap_uri'),
'ldap://ldap.example.com')
# Remove the LDAP provider and verify that krb5_realm remains
domain.remove_provider('id')
self.assertEquals(domain.get_option('krb5_realm'),
'EXAMPLE.COM')
self.assertFalse(domain.options.has_key('ldap_uri'))
# Put the LOCAL provider back
domain.add_provider('local', 'id')
# Remove the auth domain and verify that the options
# revert to the backup_list
domain.remove_provider('auth')
self.assertFalse(domain.options.has_key('auth_provider'))
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in backup_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in backup_list,
'Option [%s] unexpectedly found' %
option)
# Ensure that the krb5_realm option is now gone
self.assertFalse(domain.options.has_key('krb5_realm'))
# Test removing nonexistent provider - Real
domain.remove_provider('id')
self.assertFalse(domain.options.has_key('id_provider'))
# Test removing nonexistent provider - Bad backend type
# Should pass without complaint
domain.remove_provider('id')
self.assertFalse(domain.options.has_key('id_provider'))
# Test removing nonexistent provider - Bad provider type
# Should pass without complaint
domain.remove_provider('nosuchprovider')
self.assertFalse(domain.options.has_key('nosuchprovider_provider'))
def testGetOption(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Negative Test - Try to get valid option that is not set
self.assertRaises(SSSDConfig.NoOptionError, domain.get_option, 'max_id')
# Positive Test - Set the above option and get it
domain.set_option('max_id', 10000)
self.assertEqual(domain.get_option('max_id'), 10000)
# Negative Test - Try yo get invalid option
self.assertRaises(SSSDConfig.NoOptionError, domain.get_option, 'nosuchoption')
def testSetOption(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive Test
domain.set_option('max_id', 10000)
self.assertEqual(domain.get_option('max_id'), 10000)
# Positive Test - Remove option if value is None
domain.set_option('max_id', None)
self.assertTrue('max_id' not in domain.get_all_options().keys())
# Negative Test - invalid option
self.assertRaises(SSSDConfig.NoOptionError, domain.set_option, 'nosuchoption', 1)
# Negative Test - incorrect type
self.assertRaises(TypeError, domain.set_option, 'max_id', 'a string')
# Positive Test - Coax options to appropriate type
domain.set_option('max_id', '10000')
self.assertEqual(domain.get_option('max_id'), 10000)
domain.set_option('max_id', 30.2)
self.assertEqual(domain.get_option('max_id'), 30)
def testRemoveOption(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive test - Remove unset but valid option
self.assertFalse('max_id' in domain.get_all_options().keys())
domain.remove_option('max_id')
self.assertFalse('max_id' in domain.get_all_options().keys())
# Positive test - Remove unset and unknown option
self.assertFalse('nosuchoption' in domain.get_all_options().keys())
domain.remove_option('nosuchoption')
self.assertFalse('nosuchoption' in domain.get_all_options().keys())
def testSetName(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive test - Change the name once
domain.set_name('sssd2');
self.assertEqual(domain.get_name(), 'sssd2')
self.assertEqual(domain.oldname, 'sssd')
# Positive test - Change the name a second time
domain.set_name('sssd3')
self.assertEqual(domain.get_name(), 'sssd3')
self.assertEqual(domain.oldname, 'sssd')
# Negative test - try setting the name to a non-string
self.assertRaises(TypeError,
domain.set_name, 4)
class SSSDConfigTestSSSDConfig(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testInit(self):
# Positive test
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - No Such File
self.assertRaises(IOError,
SSSDConfig.SSSDConfig, "nosuchfile.api.conf", srcdir + "/etc/sssd.api.d")
# Negative Test - Schema is not parsable
self.assertRaises(SSSDConfig.ParsingError,
SSSDConfig.SSSDConfig, srcdir + "/testconfigs/noparse.api.conf", srcdir + "/etc/sssd.api.d")
def testImportConfig(self):
# Positive Test
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Verify that all sections were imported
control_list = [
'sssd',
'nss',
'pam',
'sudo',
'domain/PROXY',
'domain/IPA',
'domain/LOCAL',
'domain/LDAP',
'domain/INVALIDPROVIDER',
'domain/INVALIDOPTION',
]
for section in control_list:
self.assertTrue(sssdconfig.has_section(section),
"Section [%s] missing" %
section)
for section in sssdconfig.sections():
self.assertTrue(section['name'] in control_list)
# Verify that all options were imported for a section
control_list = [
'services',
'reconnection_retries',
'domains',
'debug_timestamps',
'config_file_version']
for option in control_list:
self.assertTrue(sssdconfig.has_option('sssd', option),
"Option [%s] missing from [sssd]" %
option)
for option in sssdconfig.options('sssd'):
if option['type'] in ('empty', 'comment'):
continue
self.assertTrue(option['name'] in control_list,
"Option [%s] unexpectedly found" %
option)
#TODO: Check the types and values of the settings
# Negative Test - Missing config file
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(IOError, sssdconfig.import_config, "nosuchfile.conf")
# Negative Test - Invalid config file
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(SSSDConfig.ParsingError, sssdconfig.import_config, srcdir + "/testconfigs/sssd-invalid.conf")
# Negative Test - Invalid config file version
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(SSSDConfig.ParsingError, sssdconfig.import_config, srcdir + "/testconfigs/sssd-badversion.conf")
# Negative Test - No config file version
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(SSSDConfig.ParsingError, sssdconfig.import_config, srcdir + "/testconfigs/sssd-noversion.conf")
# Negative Test - Already initialized
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
self.assertRaises(SSSDConfig.AlreadyInitializedError,
sssdconfig.import_config, srcdir + "/testconfigs/sssd-valid.conf")
def testNewConfig(self):
# Positive Test
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
# Check that the defaults were set
control_list = [
'sssd',
'nss',
'pam',
'sudo',
'autofs',
'ssh',
'pac']
for section in control_list:
self.assertTrue(sssdconfig.has_section(section),
"Section [%s] missing" %
section)
for section in sssdconfig.sections():
self.assertTrue(section['name'] in control_list)
control_list = [
'config_file_version',
'services']
for option in control_list:
self.assertTrue(sssdconfig.has_option('sssd', option),
"Option [%s] missing from [sssd]" %
option)
for option in sssdconfig.options('sssd'):
if option['type'] in ('empty', 'comment'):
continue
self.assertTrue(option['name'] in control_list,
"Option [%s] unexpectedly found" %
option)
# Negative Test - Already Initialized
self.assertRaises(SSSDConfig.AlreadyInitializedError, sssdconfig.new_config)
def testWrite(self):
#TODO Write tests to compare output files
pass
def testListActiveServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_active_services)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'nss',
'pam']
active_services = sssdconfig.list_active_services()
for service in control_list:
self.assertTrue(service in active_services,
"Service [%s] missing" %
service)
for service in active_services:
self.assertTrue(service in control_list,
"Service [%s] unexpectedly found" %
service)
def testListInactiveServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_inactive_services)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'sssd',
'sudo']
inactive_services = sssdconfig.list_inactive_services()
for service in control_list:
self.assertTrue(service in inactive_services,
"Service [%s] missing" %
service)
for service in inactive_services:
self.assertTrue(service in control_list,
"Service [%s] unexpectedly found" %
service)
def testListServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - sssdconfig not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_services)
sssdconfig.new_config()
control_list = [
'sssd',
'pam',
'nss',
'sudo',
'autofs',
'ssh',
'pac']
service_list = sssdconfig.list_services()
for service in control_list:
self.assertTrue(service in service_list,
"Service [%s] missing" %
service)
for service in service_list:
self.assertTrue(service in control_list,
"Service [%s] unexpectedly found" %
service)
def testGetService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.get_service, 'sssd')
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
service = sssdconfig.get_service('sssd')
self.assertTrue(isinstance(service, SSSDConfig.SSSDService))
# Verify the contents of this service
self.assertEqual(type(service.get_option('debug_timestamps')), bool)
self.assertFalse(service.get_option('debug_timestamps'))
# Negative Test - No such service
self.assertRaises(SSSDConfig.NoServiceError, sssdconfig.get_service, 'nosuchservice')
# Positive test - Service with invalid option loads
# but ignores the invalid option
service = sssdconfig.get_service('pam')
self.assertFalse(service.options.has_key('nosuchoption'))
def testNewService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.new_service, 'sssd')
sssdconfig.new_config()
# Positive Test
# First need to remove the existing service
sssdconfig.delete_service('sssd')
service = sssdconfig.new_service('sssd')
self.failUnless(service.get_name() in sssdconfig.list_services())
# TODO: check that the values of this new service
# are set to the defaults from the schema
def testDeleteService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.delete_service, 'sssd')
sssdconfig.new_config()
# Positive Test
service = sssdconfig.delete_service('sssd')
def testSaveService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
new_service = SSSDConfig.SSSDService('sssd', sssdconfig.schema)
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.save_service, new_service)
# Positive Test
sssdconfig.new_config()
sssdconfig.save_service(new_service)
# TODO: check that all entries were saved correctly (change a few)
# Negative Test - Type Error
self.assertRaises(TypeError, sssdconfig.save_service, self)
def testActivateService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
service_name = 'sudo'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_service, service_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test - Activate an inactive service
self.assertTrue(service_name in sssdconfig.list_services())
self.assertFalse(service_name in sssdconfig.list_active_services())
self.assertTrue(service_name in sssdconfig.list_inactive_services())
sssdconfig.activate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertTrue(service_name in sssdconfig.list_active_services())
self.assertFalse(service_name in sssdconfig.list_inactive_services())
# Positive test - Activate an active service
# This should succeed
sssdconfig.activate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertTrue(service_name in sssdconfig.list_active_services())
self.assertFalse(service_name in sssdconfig.list_inactive_services())
# Negative test - Invalid service name
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, 'nosuchservice')
# Negative test - Invalid service name type
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, self)
def testDeactivateService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
service_name = 'pam'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_service, service_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test -Deactivate an active service
self.assertTrue(service_name in sssdconfig.list_services())
self.assertTrue(service_name in sssdconfig.list_active_services())
self.assertFalse(service_name in sssdconfig.list_inactive_services())
sssdconfig.deactivate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertFalse(service_name in sssdconfig.list_active_services())
self.assertTrue(service_name in sssdconfig.list_inactive_services())
# Positive test - Deactivate an inactive service
# This should succeed
sssdconfig.deactivate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertFalse(service_name in sssdconfig.list_active_services())
self.assertTrue(service_name in sssdconfig.list_inactive_services())
# Negative test - Invalid service name
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, 'nosuchservice')
# Negative test - Invalid service name type
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, self)
def testListActiveDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_active_domains)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'IPA',
'LOCAL']
active_domains = sssdconfig.list_active_domains()
for domain in control_list:
self.assertTrue(domain in active_domains,
"Domain [%s] missing" %
domain)
for domain in active_domains:
self.assertTrue(domain in control_list,
"Domain [%s] unexpectedly found" %
domain)
def testListInactiveDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_inactive_domains)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'PROXY',
'LDAP',
'INVALIDPROVIDER',
'INVALIDOPTION',
]
inactive_domains = sssdconfig.list_inactive_domains()
for domain in control_list:
self.assertTrue(domain in inactive_domains,
"Domain [%s] missing" %
domain)
for domain in inactive_domains:
self.assertTrue(domain in control_list,
"Domain [%s] unexpectedly found" %
domain)
def testListDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_domains)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'IPA',
'LOCAL',
'PROXY',
'LDAP',
'INVALIDPROVIDER',
'INVALIDOPTION',
]
domains = sssdconfig.list_domains()
for domain in control_list:
self.assertTrue(domain in domains,
"Domain [%s] missing" %
domain)
for domain in domains:
self.assertTrue(domain in control_list,
"Domain [%s] unexpectedly found" %
domain)
def testGetDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.get_domain, 'sssd')
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
domain = sssdconfig.get_domain('IPA')
self.assertTrue(isinstance(domain, SSSDConfig.SSSDDomain))
self.assertTrue(domain.active)
domain = sssdconfig.get_domain('LDAP')
self.assertTrue(isinstance(domain, SSSDConfig.SSSDDomain))
self.assertFalse(domain.active)
# TODO verify the contents of this domain
self.assertTrue(domain.get_option('ldap_id_use_start_tls'))
# Negative Test - No such domain
self.assertRaises(SSSDConfig.NoDomainError, sssdconfig.get_domain, 'nosuchdomain')
# Positive Test - Domain with unknown provider
# Expected result: Domain is imported, but does not contain the
# unknown provider entry
domain = sssdconfig.get_domain('INVALIDPROVIDER')
self.assertFalse(domain.options.has_key('chpass_provider'))
# Positive Test - Domain with unknown option
# Expected result: Domain is imported, but does not contain the
# unknown option entry
domain = sssdconfig.get_domain('INVALIDOPTION')
self.assertFalse(domain.options.has_key('nosuchoption'))
def testNewDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.new_domain, 'example.com')
sssdconfig.new_config()
# Positive Test
domain = sssdconfig.new_domain('example.com')
self.assertTrue(isinstance(domain, SSSDConfig.SSSDDomain))
self.failUnless(domain.get_name() in sssdconfig.list_domains())
self.failUnless(domain.get_name() in sssdconfig.list_inactive_domains())
# TODO: check that the values of this new domain
# are set to the defaults from the schema
def testDeleteDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.delete_domain, 'IPA')
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
self.assertTrue('IPA' in sssdconfig.list_domains())
self.assertTrue('IPA' in sssdconfig.list_active_domains())
self.assertTrue(sssdconfig.has_section('domain/IPA'))
sssdconfig.delete_domain('IPA')
self.assertFalse('IPA' in sssdconfig.list_domains())
self.assertFalse('IPA' in sssdconfig.list_active_domains())
self.assertFalse(sssdconfig.has_section('domain/IPA'))
def testSaveDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.save_domain, 'IPA')
# Positive Test
sssdconfig.new_config()
domain = sssdconfig.new_domain('example.com')
domain.add_provider('ldap', 'id')
domain.set_option('ldap_uri', 'ldap://ldap.example.com')
domain.set_active(True)
sssdconfig.save_domain(domain)
self.assertTrue('example.com' in sssdconfig.list_domains())
self.assertTrue('example.com' in sssdconfig.list_active_domains())
self.assertEqual(sssdconfig.get('domain/example.com', 'ldap_uri'),
'ldap://ldap.example.com')
# Negative Test - Type Error
self.assertRaises(TypeError, sssdconfig.save_domain, self)
# Positive test - Change the domain name and save it
domain.set_name('example.com2')
self.assertEqual(domain.name,'example.com2')
self.assertEqual(domain.oldname,'example.com')
sssdconfig.save_domain(domain)
self.assertTrue('example.com2' in sssdconfig.list_domains())
self.assertTrue('example.com2' in sssdconfig.list_active_domains())
self.assertTrue(sssdconfig.has_section('domain/example.com2'))
self.assertEqual(sssdconfig.get('domain/example.com2',
'ldap_uri'),
'ldap://ldap.example.com')
self.assertFalse('example.com' in sssdconfig.list_domains())
self.assertFalse('example.com' in sssdconfig.list_active_domains())
self.assertFalse('example.com' in sssdconfig.list_inactive_domains())
self.assertFalse(sssdconfig.has_section('domain/example.com'))
self.assertEquals(domain.oldname, None)
# Positive test - Set the domain inactive and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
domain.set_active(False)
sssdconfig.save_domain(domain)
self.assertFalse('example.com2' in sssdconfig.list_active_domains())
self.assertTrue('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)-1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)+1)
# Positive test - Set the domain active and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
domain.set_active(True)
sssdconfig.save_domain(domain)
self.assertTrue('example.com2' in sssdconfig.list_active_domains())
self.assertFalse('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)+1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)-1)
# Positive test - Set the domain inactive and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
sssdconfig.deactivate_domain(domain.get_name())
self.assertFalse('example.com2' in sssdconfig.list_active_domains())
self.assertTrue('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)-1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)+1)
# Positive test - Set the domain active and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
sssdconfig.activate_domain(domain.get_name())
self.assertTrue('example.com2' in sssdconfig.list_active_domains())
self.assertFalse('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)+1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)-1)
# Positive test - Ensure that saved domains retain values
domain.set_option('ldap_krb5_init_creds', True)
domain.set_option('ldap_id_use_start_tls', False)
domain.set_option('ldap_user_search_base',
'cn=accounts, dc=example, dc=com')
self.assertTrue(domain.get_option('ldap_krb5_init_creds'))
self.assertFalse(domain.get_option('ldap_id_use_start_tls'))
self.assertEqual(domain.get_option('ldap_user_search_base'),
'cn=accounts, dc=example, dc=com')
sssdconfig.save_domain(domain)
of = '/tmp/testSaveDomain.out'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
domain2 = sssdconfig.get_domain('example.com2')
self.assertTrue(domain2.get_option('ldap_krb5_init_creds'))
self.assertFalse(domain2.get_option('ldap_id_use_start_tls'))
def testActivateDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
domain_name = 'PROXY'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_domain, domain_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test - Activate an inactive domain
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertFalse(domain_name in sssdconfig.list_active_domains())
self.assertTrue(domain_name in sssdconfig.list_inactive_domains())
sssdconfig.activate_domain('PROXY')
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertTrue(domain_name in sssdconfig.list_active_domains())
self.assertFalse(domain_name in sssdconfig.list_inactive_domains())
# Positive test - Activate an active domain
# This should succeed
sssdconfig.activate_domain('PROXY')
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertTrue(domain_name in sssdconfig.list_active_domains())
self.assertFalse(domain_name in sssdconfig.list_inactive_domains())
# Negative test - Invalid domain name
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, 'nosuchdomain')
# Negative test - Invalid domain name type
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, self)
def testDeactivateDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
domain_name = 'IPA'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_domain, domain_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test -Deactivate an active domain
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertTrue(domain_name in sssdconfig.list_active_domains())
self.assertFalse(domain_name in sssdconfig.list_inactive_domains())
sssdconfig.deactivate_domain(domain_name)
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertFalse(domain_name in sssdconfig.list_active_domains())
self.assertTrue(domain_name in sssdconfig.list_inactive_domains())
# Positive test - Deactivate an inactive domain
# This should succeed
sssdconfig.deactivate_domain(domain_name)
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertFalse(domain_name in sssdconfig.list_active_domains())
self.assertTrue(domain_name in sssdconfig.list_inactive_domains())
# Negative test - Invalid domain name
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, 'nosuchdomain')
# Negative test - Invalid domain name type
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, self)
if __name__ == "__main__":
error = 0
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestSSSDService)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x1
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestSSSDDomain)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x2
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestSSSDConfig)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x4
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestValid)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x8
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestInvalid)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x10
sys.exit(error)
| yelley/sssd-gpo | src/config/SSSDConfigTest.py | Python | gpl-3.0 | 71,528 |
import sys
from PyQt4 import QtCore, QtGui, uic
import light
qtCreatorFile = "pythonlight.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class LightApp(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.light = light.Light()
self.lightDisplay.setText(self.light.show())
self.lightSwitch.released.connect(self.toggle)
def toggle(self):
self.light.switch()
self.lightDisplay.setText(self.light.show())
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = LightApp()
window.show()
sys.exit(app.exec_())
| chrfrantz/op-papers | oosd/week07/light/pythonlight.py | Python | gpl-3.0 | 737 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para flashx
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import logger
from core import jsunpack
from core import scrapertools
headers = [['User-Agent', 'Mozilla/5.0']]
def test_video_exists(page_url):
logger.info("pelisalacarta.servers.flashx test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url, headers=headers)
if 'FILE NOT FOUND' in data:
return False, "[FlashX] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("pelisalacarta.servers.flashx url=" + page_url)
# Lo pide una vez
data = scrapertools.cache_page(page_url, headers=headers)
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
if "You try to access this video with Kodi" in data:
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
data = scrapertools.cache_page(url_reload, headers=headers)
data = scrapertools.cache_page(page_url, headers=headers)
match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")
if match.startswith("eval"):
match = jsunpack.unpack(match)
# Extrae la URL
# {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
video_urls = []
media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)"')
for media_url in media_urls:
if not media_url.endswith("png"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url])
for video_url in video_urls:
logger.info("pelisalacarta.servers.flashx %s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
# Añade manualmente algunos erróneos para evitarlos
encontrados = set()
devuelve = []
# http://flashx.tv/z3nnqbspjyne
# http://www.flashx.tv/embed-li5ydvxhg514.html
patronvideos = 'flashx.(?:tv|pw)/(?:embed-|)([a-z0-9A-Z]+)'
logger.info("pelisalacarta.servers.flashx find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[flashx]"
url = "http://www.flashx.tv/playvid-%s.html" % match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'flashx'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
| kampanita/pelisalacarta | python/main-classic/servers/flashx.py | Python | gpl-3.0 | 2,876 |
"""
Classes and functions to manage arkOS tracked services.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import glob
import miniupnpc
import random
from arkos import config, logger, policies, signals, storage, security
from arkos.messages import Notification
from arkos.utilities import errors, test_port
COMMON_PORTS = [3000, 3306, 5222, 5223, 5232]
class SecurityPolicy:
"""
An object representing an arkOS firewall policy for a service.
SecurityPolicies are created for all websites, as well as for all apps
that have port-based services registered in their metadata files. They
are used to compute the proper values to put into the arkOS firewall
(iptables) on regeneration or app update.
"""
def __init__(self, type="", id="", name="", icon="", ports=[],
policy=2, addr=None):
"""
Initialize the policy object.
To create a new policy or to see more info about these parameters,
see ``tracked_services.register()`` below.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param int policy: Policy identifier
:param str addr: Address and port (for websites)
"""
self.type = type
self.id = id
self.name = name
self.icon = icon
self.ports = ports
self.policy = policy
self.addr = addr
def save(self, fw=True):
"""
Save changes to a security policy to disk.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
policies.append(
"custom",
{"id": self.id, "name": self.name, "icon": self.icon,
"ports": self.ports, "policy": self.policy}
)
else:
policies.set(self.type, self.id, self.policy)
policies.save()
storage.policies[self.id] = self
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def remove(self, fw=True):
"""
Remove a security policy from the firewall and config.
You should probably use ``tracked_services.deregister()`` for this.
:param bool fw: Regenerate the firewall after save?
"""
if self.type == "custom":
for x in policies.get_all("custom"):
if self.id == x["id"]:
policies.remove_list("custom", x)
break
else:
policies.remove(self.type, self.id)
policies.save()
if self.id in storage.policies:
del storage.policies[self.id]
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
@property
def as_dict(self):
"""Return policy metadata as dict."""
return {
"type": self.type,
"id": self.id,
"name": self.name,
"icon": self.icon,
"ports": self.ports,
"policy": self.policy,
"is_ready": True
}
@property
def serialized(self):
"""Return serializable policy metadata as dict."""
return self.as_dict
class PortConflictError(errors.Error):
"""Raised when an address and port requested are not available."""
def __init__(self, port, domain):
self.port = port
self.domain = domain
def __str__(self):
return ("This port is taken by another site or service, "
"please choose another")
def get(id=None, type=None):
"""
Get all security policies from cache storage.
:param str id: App or website ID
:param str type: Filter by type ('website', 'app', etc)
"""
data = storage.policies
if id:
return data.get(id)
if type:
return filter(lambda x: x.type == type, data.values())
return data.values()
def register(type, id, name, icon, ports, domain=None, policy=0,
default_policy=2, fw=True):
"""
Register a new security policy with the system.
The ``ports`` parameter takes tuples of ports to manage, like so:
ports = [('tcp', 8000), ('udp', 21500)]
The ``policy`` parameter is an integer with the following meaning:
0 = Restrict access from all outside hosts. (excludes loopback)
1 = Restrict access to local networks only.
2 = Allow access to all networks and ultimately the whole Internet.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param str name: Display name to use in Security settings pane
:param str icon: FontAwesome icon class name
:param list ports: List of port tuples to allow/restrict
:param str domain: Address (for websites)
:param int policy: Policy identifier
:param int default_policy: Application default policy to use on first init
:param bool fw: Regenerate the firewall after save?
"""
if not policy:
policy = policies.get(type, id, default_policy)
svc = SecurityPolicy(type, id, name, icon, ports, policy, domain)
svc.save(fw)
def deregister(type, id="", fw=True):
"""
Deregister a security policy.
:param str type: Policy type ('website', 'app', etc)
:param str id: Website or app ID
:param bool fw: Regenerate the firewall after save?
"""
for x in get(type=type):
if not id:
x.remove(fw=False)
elif x.id == id:
x.remove(fw=False)
break
if config.get("general", "firewall") and fw:
security.regenerate_firewall(get())
def refresh_policies():
"""Recreate security policies based on what is stored in config."""
svcs = get()
newpolicies = {}
for x in policies.get_all():
if x == "custom":
newpolicies["custom"] = policies.get_all("custom")
for y in svcs:
if x == y.type:
if x not in newpolicies:
newpolicies[x] = {}
for s in policies.get_all(x):
if s == y.id:
newpolicies[x][s] = policies.get(x, s)
policies.config = newpolicies
policies.save()
def is_open_port(port, domain=None, ignore_common=False):
"""
Check if the specified port is taken by a tracked service or not.
Addresses should be provided for websites, because multiple websites can
be served from the same port (SNI) as long as the address is different.
:param int port: Port number to check
:param str domain: Address to check (for websites)
:param bool ignore_common: Don't return False for commonly used ports?
:returns: True if port is open
:rtype bool:
"""
data = get()
ports = []
for x in data:
if domain and x.type == "website" and domain != x.addr:
continue
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
return port not in ports
def _upnp_igd_connect():
logger.debug("TrSv", "Attempting to connect to uPnP IGD")
upnpc = miniupnpc.UPnP()
upnpc.discoverdelay = 3000
devs = upnpc.discover()
if devs == 0:
msg = "Failed to connect to uPnP IGD: no devices found"
logger.warning("TrSv", msg)
return
try:
upnpc.selectigd()
except Exception as e:
msg = "Failed to connect to uPnP IGD: {0}"
logger.warning("TrSv", msg.format(str(e)))
return upnpc
def open_upnp(port):
"""
Open and forward a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(int(port[1]), port[0].upper()):
try:
upnpc.deleteportmapping(int(port[1]), port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(
int(port[1]), port[0].upper(), upnpc.lanaddr, int(port[1]),
pf.format(port[1]), ''
)
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_upnp(port):
"""
Remove forwarding of a port with the local uPnP IGD.
:param tuple port: Port protocol and number
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def initialize_upnp(svcs):
"""
Initialize uPnP port forwarding with the IGD.
:param SecurityPolicy svcs: SecurityPolicies to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for svc in svcs:
if svc.policy != 2:
continue
for protocol, port in svc.ports:
if upnpc.getspecificportmapping(port, protocol.upper()):
try:
upnpc.deleteportmapping(port, protocol.upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port, protocol.upper(), upnpc.lanaddr,
port, pf.format(port), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"\
.format(port, str(e))
logger.warning("TrSv", msg)
def open_all_upnp(ports):
"""
Open and forward multiple ports with the local uPnP IGD.
:param list ports: List of port objects to open
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
try:
pf = 'arkOS Port Forwarding: {0}'
upnpc.addportmapping(port[1], port[0].upper(), upnpc.lanaddr,
port[1], pf.format(port[1]), '')
except Exception as e:
msg = "Failed to register {0} with uPnP IGD: {1}"
logger.error("TrSv", msg.format(port, str(e)))
def close_all_upnp(ports):
"""
Remove forwarding of multiple ports with the local uPnP IGD.
:param list ports: List of port objects to close
"""
upnpc = _upnp_igd_connect()
if not upnpc:
return
for port in [x for x in ports]:
if upnpc.getspecificportmapping(port[1], port[0].upper()):
try:
upnpc.deleteportmapping(port[1], port[0].upper())
except:
pass
def get_open_port(ignore_common=False):
"""
Get a random TCP port not currently in use by a tracked service.
:param bool ignore_common: Don't exclude commonly used ports?
:returns: Port number
:rtype: int
"""
data = get()
ports = []
for x in data:
for y in x.ports:
ports.append(int(y[1]))
if not ignore_common:
ports = ports + COMMON_PORTS
r = random.randint(8001, 65534)
return r if r not in ports else get_open_port()
def initialize():
"""Initialize security policy tracking."""
logger.debug("TrSv", "Initializing security policy tracking")
# arkOS
policy = policies.get("arkos", "arkos", 2)
port = [("tcp", int(config.get("genesis", "port")))]
pol = SecurityPolicy("arkos", "arkos", "System Management (Genesis/APIs)",
"server", port, policy)
storage.policies[pol.id] = pol
# uPNP
policy = policies.get("arkos", "upnp", 1)
pol = SecurityPolicy("arkos", "upnp", "uPnP Firewall Comms",
"server", [("udp", 1900)], policy)
if config.get("general", "enable_upnp"):
storage.policies[pol.id] = pol
# SSHd
policy = policies.get("arkos", "sshd", 1)
pol = SecurityPolicy(
"arkos", "sshd", "SSH", "server", [("tcp", 22)], policy)
# ACME dummies
for x in glob.glob("/etc/nginx/sites-enabled/acme-*"):
acme_name = x.split("/etc/nginx/sites-enabled/acme-")[1]
pol = SecurityPolicy(
"acme", acme_name, "{0} (ACME Validation)".format(acme_name),
"globe", [('tcp', 80)], 2
)
storage.policies[pol.id] = pol
for x in policies.get_all("custom"):
pol = SecurityPolicy("custom", x["id"], x["name"], x["icon"],
x["ports"], x["policy"])
storage.policies[pol.id] = pol
def register_website(site):
"""Convenience function to register a website as tracked service."""
register("website", site.id, getattr(site, "name", site.id),
site.app.icon if site.app else "globe",
[("tcp", site.port)], site.domain)
def deregister_website(site):
"""Convenience function to deregister a website as tracked service."""
deregister("website", site.id)
def open_upnp_site(site):
"""Convenience function to register a website with uPnP."""
if config.get("general", "enable_upnp"):
open_upnp(("tcp", site.port))
domain = site.domain
if domain == "localhost" or domain.endswith(".local"):
domain = None
try:
test_port(config.get("general", "repo_server"), site.port, domain)
except:
msg = ("Port {0} and/or domain {1} could not be tested."
" Make sure your ports are properly forwarded and"
" that your domain is properly set up.")\
.format(site.port, site.domain)
Notification("error", "TrSv", msg).send()
def close_upnp_site(site):
"""Convenience function to deregister a website with uPnP."""
if config.get("general", "enable_upnp"):
close_upnp(("tcp", site.port))
signals.add("tracked_services", "websites", "site_loaded", register_website)
signals.add("tracked_services", "websites", "site_installed", register_website)
signals.add("tracked_services", "websites", "site_installed", open_upnp_site)
signals.add("tracked_services", "websites", "site_removed", deregister_website)
signals.add("tracked_services", "websites", "site_removed", close_upnp_site)
| pomarec/core | arkos/tracked_services.py | Python | gpl-3.0 | 14,893 |
import sys
from collections import namedtuple
from AnyQt.QtWidgets import QSizePolicy, QLayout
from AnyQt.QtCore import Slot
import Orange.data
from Orange.widgets.utils.datacaching import data_hints
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels
from Orange.widgets.utils.concurrent import ThreadExecutor, Task, methodinvoke
from orangecontrib import network
from .. import ppi, taxonomy, gene
from ..utils import serverfiles, compat
Source = namedtuple(
"Source",
["name", "constructor", "tax_mapping", "sf_domain", "sf_filename",
"score_filter"]
)
SOURCES = [
Source("BioGRID", ppi.BioGRID, ppi.BioGRID.TAXID_MAP,
"PPI", ppi.BioGRID.SERVER_FILE, False),
Source("STRING", ppi.STRING, ppi.STRING.TAXID_MAP,
"PPI", ppi.STRING.FILENAME, True)
]
class OWGeneNetwork(widget.OWWidget):
name = "Gene Network"
description = "Extract a gene network for a set of genes."
icon = "../widgets/icons/GeneNetwork.svg"
inputs = [("Data", Orange.data.Table, "set_data")]
outputs = [("Network", network.Graph)]
settingsHandler = settings.DomainContextHandler()
taxid = settings.Setting("9606")
gene_var_index = settings.ContextSetting(-1)
use_attr_names = settings.ContextSetting(False)
network_source = settings.Setting(1)
include_neighborhood = settings.Setting(True)
min_score = settings.Setting(0.9)
want_main_area = False
def __init__(self, parent=None):
super().__init__(parent)
self.taxids = taxonomy.common_taxids()
self.current_taxid_index = self.taxids.index(self.taxid)
self.data = None
self.geneinfo = None
self.nettask = None
self._invalidated = False
box = gui.widgetBox(self.controlArea, "Info")
self.info = gui.widgetLabel(box, "No data on input\n")
box = gui.widgetBox(self.controlArea, "Organism")
self.organism_cb = gui.comboBox(
box, self, "current_taxid_index",
items=map(taxonomy.name, self.taxids),
callback=self._update_organism
)
box = gui.widgetBox(self.controlArea, "Genes")
self.genes_cb = gui.comboBox(
box, self, "gene_var_index", callback=self._update_query_genes
)
self.varmodel = itemmodels.VariableListModel()
self.genes_cb.setModel(self.varmodel)
gui.checkBox(
box, self, "use_attr_names",
"Use attribute names",
callback=self._update_query_genes
)
box = gui.widgetBox(self.controlArea, "Network")
gui.comboBox(
box, self, "network_source",
items=[s.name for s in SOURCES],
callback=self._on_source_db_changed
)
gui.checkBox(
box, self, "include_neighborhood",
"Include immediate gene neighbors",
callback=self.invalidate
)
self.score_spin = gui.doubleSpin(
box, self, "min_score", 0.0, 1.0, step=0.001,
label="Minimal edge score",
callback=self.invalidate
)
self.score_spin.setEnabled(SOURCES[self.network_source].score_filter)
box = gui.widgetBox(self.controlArea, "Commit")
gui.button(box, self, "Retrieve", callback=self.commit, default=True)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self.executor = ThreadExecutor()
def set_data(self, data):
self.closeContext()
self.data = data
if data is not None:
self.varmodel[:] = string_variables(data.domain)
taxid = data_hints.get_hint(data, "taxid", default=self.taxid)
if taxid in self.taxids:
self.set_organism(self.taxids.index(taxid))
self.use_attr_names = data_hints.get_hint(
data, "genesinrows", default=self.use_attr_names
)
if not (0 <= self.gene_var_index < len(self.varmodel)):
self.gene_var_index = len(self.varmodel) - 1
self.openContext(data)
self.invalidate()
self.commit()
else:
self.varmodel[:] = []
self.send("Network", None)
def set_source_db(self, dbindex):
self.network_source = dbindex
self.invalidate()
def set_organism(self, index):
self.current_taxid_index = index
self.taxid = self.taxids[index]
self.invalidate()
def set_gene_var(self, index):
self.gene_var_index = index
self.invalidate()
def query_genes(self):
if self.use_attr_names:
if self.data is not None:
return [var.name for var in self.data.domain.attributes]
else:
return []
elif self.gene_var_index >= 0:
var = self.varmodel[self.gene_var_index]
genes = [str(inst[var]) for inst in self.data
if not compat.isunknown(inst[var])]
return list(unique(genes))
else:
return []
def invalidate(self):
self._invalidated = True
if self.nettask is not None:
self.nettask.finished.disconnect(self._on_result_ready)
self.nettask.future().cancel()
self.nettask = None
@Slot()
def advance(self):
self.progressBarValue = (self.progressBarValue + 1) % 100
@Slot(float)
def set_progress(self, value):
self.progressBarSet(value, processEvents=None)
def commit(self):
include_neighborhood = self.include_neighborhood
query_genes = self.query_genes()
source = SOURCES[self.network_source]
if source.score_filter:
min_score = self.min_score
assert source.name == "STRING"
min_score = min_score * 1000
else:
min_score = None
taxid = self.taxid
progress = methodinvoke(self, "advance")
if self.geneinfo is None:
self.geneinfo = self.executor.submit(
fetch_ncbi_geneinfo, taxid, progress
)
geneinfo_f = self.geneinfo
taxmap = source.tax_mapping
db_taxid = taxmap.get(taxid, taxid)
if db_taxid is None:
raise ValueError("invalid taxid for this network")
def fetch_network():
geneinfo = geneinfo_f.result()
ppidb = fetch_ppidb(source, db_taxid, progress)
return get_gene_network(ppidb, geneinfo, db_taxid, query_genes,
include_neighborhood=include_neighborhood,
min_score=min_score,
progress=methodinvoke(self, "set_progress", (float,)))
self.nettask = Task(function=fetch_network)
self.nettask.finished.connect(self._on_result_ready)
self.executor.submit(self.nettask)
self.setBlocking(True)
self.setEnabled(False)
self.progressBarInit()
self._invalidated = False
self._update_info()
@Slot()
def _on_result_ready(self,):
self.progressBarFinished()
self.setBlocking(False)
self.setEnabled(True)
net = self.nettask.result()
self._update_info()
self.send("Network", net)
def _on_source_db_changed(self):
source = SOURCES[self.network_source]
self.score_spin.setEnabled(source.score_filter)
self.invalidate()
def _update_organism(self):
self.taxid = self.taxids[self.current_taxid_index]
if self.geneinfo is not None:
self.geneinfo.cancel()
self.geneinfo = None
self.invalidate()
def _update_query_genes(self):
self.invalidate()
def _update_info(self):
if self.data is None:
self.info.setText("No data on input\n")
else:
names = self.query_genes()
lines = ["%i unique genes on input" % len(set(names))]
if self.nettask is not None:
if not self.nettask.future().done():
lines.append("Retrieving ...")
else:
net = self.nettask.result()
lines.append("%i nodes %i edges" %
(len(net.nodes()), len(net.edges())))
else:
lines.append("")
self.info.setText("\n".join(lines))
def unique(seq):
seen = set()
for el in seq:
if el not in seen:
seen.add(el)
yield el
def string_variables(domain):
variables = domain.variables + domain.metas
return [v for v in variables if isinstance(v, Orange.data.StringVariable)]
def multimap_inverse(multimap):
"""
Return a multimap inverse relation.
"""
d = {}
for key, values in multimap.items():
for v in values:
d.setdefault(v, []).append(key)
return d
def ppidb_synonym_mapping(ppidb, taxid):
keys = ppidb.ids(taxid)
mapping = {key: ppidb.synonyms(key) for key in keys}
return multimap_inverse(mapping)
def taxonomy_match(query_taxids, target_taxids):
taxid_mapping = {}
target_taxids = set(target_taxids)
for taxid in query_taxids:
mapped = taxid_map(taxid, target_taxids)
taxid_mapping[taxid] = mapped
return taxid_mapping
def taxid_map(query, targets):
if query in targets:
return query
lineage = taxonomy.lineage(query)
if any(tid in targets for tid in lineage):
return set(lineage).intersection(targets).pop()
else:
return None
def fetch_ppidb(ppisource, taxid, progress=None):
fname = ppisource.sf_filename
if "{taxid}" in fname:
if taxid in ppisource.tax_mapping:
taxid_m = ppisource.tax_mapping[taxid]
if taxid_m is None:
raise ValueError(taxid)
taxid = taxid_m
fname = fname.format(taxid=taxid)
constructor = lambda: ppisource.constructor(taxid)
else:
constructor = ppisource.constructor
serverfiles.localpath_download(
ppisource.sf_domain, fname, callback=progress, verbose=True
)
return constructor()
def fetch_ncbi_geneinfo(taxid, progress=None):
taxid = gene.NCBIGeneInfo.TAX_MAP.get(taxid, taxid)
serverfiles.localpath_download(
"NCBI_geneinfo", "gene_info.{taxid}.db".format(taxid=taxid),
callback=progress, verbose=True,
)
return gene.NCBIGeneInfo(taxid)
def get_gene_network(ppidb, geneinfo, taxid, query_genes,
include_neighborhood=True, min_score=None,
progress=None):
if progress is not None:
progress(1.0)
# Normalize the names to ppidb primary keys
matcher = geneinfo.matcher
query_genes = zip(query_genes, map(matcher.umatch, query_genes))
synonyms = ppidb_synonym_mapping(ppidb, taxid)
query_genes = [(query_gene, geneid,
synonyms.get(query_gene, synonyms.get(geneid)))
for query_gene, geneid in query_genes]
query = [(syn[0], query_gene)
for query_gene, _, syn in query_genes if syn]
net = extract_network(ppidb, dict(query), geneinfo, include_neighborhood,
min_score, progress=progress)
return net
from functools import partial
from collections import defaultdict
from itertools import count
import numpy
def extract_network(ppidb, query, geneinfo, include_neighborhood=True,
min_score=None, progress=None):
if not isinstance(query, dict):
query = {name: name for name in query}
report_weights = True
if isinstance(ppidb, ppi.BioGRID):
# BioGRID scores are not comparable (they can be p values,
# confidence scores, ..., i.e. whatever was reported in the source
# publication)
report_weights = False
if min_score is not None:
raise ValueError("min_score used with BioGrid")
# graph = networkx.Graph()
graph = network.Graph()
# node ids in Orange.network.Graph need to be in [0 .. n-1]
nodeids = defaultdict(partial(next, count()))
def gi_info(names):
mapping = [(name, geneinfo.matcher.umatch(name)) for name in names]
mapping = [(name, match) for name, match in mapping if match]
entries = [(name, geneinfo[match]) for name, match in mapping]
if len(entries) > 1:
# try to resolve conflicts by prioritizing entries whose
# symbol/gene_id/locus_tag exactly matches the synonym name.
entries_ = [(name, entry) for name, entry in entries
if name in [entry.gene_id, entry.symbol, entry.locus_tag]]
if len(entries_) == 1:
entries = entries_
if len(entries) == 0:
return None
elif len(entries) >= 1:
# Need to report multiple mappings
return entries[0][1]
# Add query nodes.
for key, query_name in query.items():
nodeid = nodeids[key]
synonyms = ppidb.synonyms(key)
entry = gi_info(synonyms)
graph.add_node(
nodeid,
key=key,
synonyms=synonyms,
query_name=query_name,
symbol=entry.symbol if entry is not None else ""
)
if include_neighborhood:
# extend the set of nodes in the network with immediate neighborers
edges_iter = (edge for key in query for edge in ppidb.edges(key))
for id1, id2, score in edges_iter:
if min_score is None or score >= min_score:
nodeid1 = nodeids[id1]
nodeid2 = nodeids[id2]
if nodeid1 not in graph:
synonyms1 = ppidb.synonyms(id1)
entry1 = gi_info(synonyms1)
symbol1 = entry1.symbol if entry1 is not None else ""
graph.add_node(
nodeid1, key=id1, synonyms=synonyms1,
symbol=symbol1
)
if nodeid2 not in graph:
synonyms2 = ppidb.synonyms(id2)
entry2 = gi_info(synonyms2)
symbol2 = entry2.symbol if entry2 is not None else ""
graph.add_node(
nodeid2, key=id2, synonyms=synonyms2,
symbol=symbol2
)
# add edges between nodes
for i, id1 in enumerate(nodeids.keys()):
if progress is not None:
progress(100.0 * i / len(nodeids))
for _, id2, score in ppidb.edges(id1):
if id2 in nodeids and (min_score is None or score >= min_score):
nodeid1 = nodeids[id1]
nodeid2 = nodeids[id2]
assert nodeid1 in graph and nodeid2 in graph
if score is not None and report_weights:
graph.add_edge(nodeid1, nodeid2, weight=score)
else:
graph.add_edge(nodeid1, nodeid2)
nodedomain = Orange.data.Domain(
[], [],
[Orange.data.StringVariable("Query name"), # if applicable
Orange.data.StringVariable("id"), # ppidb primary key
Orange.data.StringVariable("Synonyms"), # ppidb synonyms
Orange.data.StringVariable("Symbol"), # ncbi gene name ??
Orange.data.DiscreteVariable("source", values=["false", "true"])],
)
N = len(graph.nodes())
node_items = sorted(graph.node.items(), key=lambda t: nodeids[t[0]])
meta = [[node.get("query_name", ""),
node.get("key", ""),
", ".join(node.get("synonyms", [])),
node.get("symbol", nodeid),
(1 if "query_name" in node else 0)]
for nodeid, node in node_items]
if not meta:
meta = numpy.empty((0, len(nodedomain.metas)),
dtype=object)
nodeitems = Orange.data.Table.from_numpy(
nodedomain,
numpy.empty((N, 0)), numpy.empty((N, 0)),
numpy.array(meta, dtype=object)
)
graph.set_items(nodeitems)
return graph
def main():
from AnyQt.QtWidgets import QApplication
app = QApplication([])
w = OWGeneNetwork()
brown = Orange.data.Table("brown-selected")
w.set_data(Orange.data.Table(brown[:5]))
w.show()
rval = app.exec_()
w.saveSettings()
return rval
if __name__ == "__main__":
sys.exit(main())
| ales-erjavec/orange-bio | orangecontrib/bio/widgets3/OWGeneNetwork.py | Python | gpl-3.0 | 16,602 |
# -*- coding: utf-8 -*-
from django import forms
class DocumentForm(forms.Form):
xmlfile = forms.FileField(label='Select the input xml file (scenario.xml)')
outputfile = forms.FileField(label='Select the survery output file (output.txt) if available', required=False)
ctsoutputfile = forms.FileField(label='Select the continuous file (ctsout.txt) if available', required=False)
save_to = forms.BooleanField(initial=False, label='Save to My Scenarios', required=False)
scenario_label = forms.CharField(label="Scenario name (optional)", required=False) | tph-thuering/vnetsource | ts_om_viz/forms.py | Python | mpl-2.0 | 571 |
from urllib.parse import urlparse
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from allauth.socialaccount.providers.oauth2.views import (
OAuth2CallbackView,
OAuth2LoginView,
)
from kuma.core.decorators import redirect_in_maintenance_mode
from kuma.core.ga_tracking import ACTION_AUTH_STARTED, CATEGORY_SIGNUP_FLOW, track_event
class KumaOAuth2LoginView(OAuth2LoginView):
def dispatch(self, request):
# TODO: Figure out a way to NOT trigger the "ACTION_AUTH_STARTED" when
# simply following the link. We've seen far too many submissions when
# curl or some browser extensions follow the link but not actually being
# users who proceed "earnestly".
# For now, to make a simple distinction between uses of `curl` and normal
# browser clicks we check that a HTTP_REFERER is actually set and comes
# from the same host as the request.
# Note! This is the same in kuma.users.providers.github.KumaOAuth2LoginView
# See https://github.com/mdn/kuma/issues/6759
http_referer = request.META.get("HTTP_REFERER")
if http_referer:
if urlparse(http_referer).netloc == request.get_host():
track_event(CATEGORY_SIGNUP_FLOW, ACTION_AUTH_STARTED, "google")
return super().dispatch(request)
oauth2_login = redirect_in_maintenance_mode(
KumaOAuth2LoginView.adapter_view(GoogleOAuth2Adapter)
)
oauth2_callback = redirect_in_maintenance_mode(
OAuth2CallbackView.adapter_view(GoogleOAuth2Adapter)
)
| Elchi3/kuma | kuma/users/providers/google/views.py | Python | mpl-2.0 | 1,558 |
# GUI frame for the dftModel_function.py
from Tkinter import *
import tkFileDialog, tkMessageBox
import sys, os
import pygame
from scipy.io.wavfile import read
import dftModel_function
class DftModel_frame:
def __init__(self, parent):
self.parent = parent
self.initUI()
pygame.init()
def initUI(self):
choose_label = "Input file (.wav, mono and 44100 sampling rate):"
Label(self.parent, text=choose_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation = Entry(self.parent)
self.filelocation.focus_set()
self.filelocation["width"] = 25
self.filelocation.grid(row=1,column=0, sticky=W, padx=10)
self.filelocation.delete(0, END)
self.filelocation.insert(0, '../../sounds/piano.wav')
#BUTTON TO BROWSE SOUND FILE
self.open_file = Button(self.parent, text="Browse...", command=self.browse_file) #see: def browse_file(self)
self.open_file.grid(row=1, column=0, sticky=W, padx=(220, 6)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE
self.preview = Button(self.parent, text=">", command=self.preview_sound, bg="gray30", fg="white")
self.preview.grid(row=1, column=0, sticky=W, padx=(306,6))
## DFT MODEL
#ANALYSIS WINDOW TYPE
wtype_label = "Window type:"
Label(self.parent, text=wtype_label).grid(row=2, column=0, sticky=W, padx=5, pady=(10,2))
self.w_type = StringVar()
self.w_type.set("blackman") # initial value
window_option = OptionMenu(self.parent, self.w_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window_option.grid(row=2, column=0, sticky=W, padx=(95,5), pady=(10,2))
#WINDOW SIZE
M_label = "Window size (M):"
Label(self.parent, text=M_label).grid(row=3, column=0, sticky=W, padx=5, pady=(10,2))
self.M = Entry(self.parent, justify=CENTER)
self.M["width"] = 5
self.M.grid(row=3,column=0, sticky=W, padx=(115,5), pady=(10,2))
self.M.delete(0, END)
self.M.insert(0, "511")
#FFT SIZE
N_label = "FFT size (N) (power of two bigger than M):"
Label(self.parent, text=N_label).grid(row=4, column=0, sticky=W, padx=5, pady=(10,2))
self.N = Entry(self.parent, justify=CENTER)
self.N["width"] = 5
self.N.grid(row=4,column=0, sticky=W, padx=(270,5), pady=(10,2))
self.N.delete(0, END)
self.N.insert(0, "1024")
#TIME TO START ANALYSIS
time_label = "Time in sound (in seconds):"
Label(self.parent, text=time_label).grid(row=5, column=0, sticky=W, padx=5, pady=(10,2))
self.time = Entry(self.parent, justify=CENTER)
self.time["width"] = 5
self.time.grid(row=5, column=0, sticky=W, padx=(180,5), pady=(10,2))
self.time.delete(0, END)
self.time.insert(0, ".2")
#BUTTON TO COMPUTE EVERYTHING
self.compute = Button(self.parent, text="Compute", command=self.compute_model, bg="dark red", fg="white")
self.compute.grid(row=6, column=0, padx=5, pady=(10,15), sticky=W)
# define options for opening file
self.file_opt = options = {}
options['defaultextension'] = '.wav'
options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')]
options['initialdir'] = '../../sounds/'
options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz'
def preview_sound(self):
filename = self.filelocation.get()
if filename[-4:] == '.wav':
fs, x = read(filename)
else:
tkMessageBox.showerror("Wav file", "The audio file must be a .wav")
return
if len(x.shape) > 1 :
tkMessageBox.showerror("Stereo file", "Audio file must be Mono not Stereo")
elif fs != 44100:
tkMessageBox.showerror("Sample Frequency", "Sample frequency must be 44100 Hz")
else:
sound = pygame.mixer.Sound(filename)
sound.play()
def browse_file(self):
self.filename = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation.delete(0, END)
self.filelocation.insert(0,self.filename)
def compute_model(self):
try:
inputFile = self.filelocation.get()
window = self.w_type.get()
M = int(self.M.get())
N = int(self.N.get())
time = float(self.time.get())
dftModel_function.extractHarmSpec(inputFile, window, M, N, time)
except ValueError as errorMessage:
tkMessageBox.showerror("Input values error",errorMessage)
| georgid/sms-tools | software/models_interface/dftModel_GUI_frame.py | Python | agpl-3.0 | 4,273 |
# Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from anki.syncserver import serve
serve()
| simgunz/anki | pylib/anki/syncserver/__main__.py | Python | agpl-3.0 | 171 |
import time
import json
import random
from flask import Flask, request, current_app, abort
from functools import wraps
from cloudbrain.utils.metadata_info import (map_metric_name_to_num_channels,
get_supported_devices,
get_metrics_names)
from cloudbrain.settings import WEBSERVER_PORT
_API_VERSION = "v1.0"
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
from cloudbrain.datastore.CassandraDAO import CassandraDAO
dao = CassandraDAO()
dao.connect()
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f()) + ')'
return current_app.response_class(content,
mimetype='application/json')
else:
return f(*args, **kwargs)
return decorated_function
@app.route('/data', methods=['GET'])
@support_jsonp
def data():
"""
GET metric data
:return:
"""
# return last 5 microseconds if start not specified.
default_start_timestamp = int(time.time() * 1000000 - 5)
device_id = request.args.get('device_id', None)
device_name = request.args.get('device_name', None)
metric = request.args.get('metric', None)
start = int(request.args.get('start', default_start_timestamp))
if not device_name:
return "missing param: device_name", 500
if not metric:
return "missing param: metric", 500
if not device_id:
return "missing param: device_id", 500
# data_records = _get_mock_data(device_name, metric)
data_records = dao.get_data(device_name, device_id, metric, start)
return json.dumps(data_records)
def _get_mock_data(device_name, metric):
metric_to_num_channels = map_metric_name_to_num_channels(device_name)
num_channels = metric_to_num_channels[metric]
now = int(time.time() * 1000000 - 5) # micro seconds
data_records = []
for i in xrange(5):
record = {'timestamp': now + i}
for j in xrange(num_channels):
channel_name = 'channel_%s' % j
record[channel_name] = random.random() * 10
data_records.append(record)
return data_records
@app.route('/metadata/devices', methods=['GET'])
@support_jsonp
def get_device_names():
""" Returns the device names from the metadata file """
return json.dumps(get_supported_devices())
@app.route('/registered_devices', methods=['GET'])
@support_jsonp
def get_registered_devices():
""" Get the registered devices IDs """
registered_devices = dao.get_registered_devices()
return json.dumps(registered_devices)
""" Tags """
def _generate_mock_tags(user_id, tag_name):
if tag_name is None:
tag_names = ["Facebook", "Netflix", "TechCrunch"]
else:
tag_names = [tag_name]
tags = []
for tag_name in tag_names:
tags.append(
{"tag_id": "c1f6e1f2-c964-48c0-8cdd-fafe8336190b",
"user_id": user_id,
"tag_name": tag_name,
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return tags
def generate_mock_tag(user_id, tag_id):
tag = {"tag_id": tag_id,
"user_id": user_id,
"tag_name": "label_1",
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
}
return tag
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tags(user_id):
"""Retrieve all tags for a specific user """
tag_name = request.args.get('tag_name', None)
#tags = _generate_mock_tags(user_id, tag_name)
tags = dao.get_tags(user_id, tag_name)
return json.dumps(tags), 200
@app.route('/api/%s/users/<string:user_id>/tags/<string:tag_id>' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tag(user_id, tag_id):
"""Retrieve a specific tag for a specific user """
#tag = dao.get_mock_tag(user_id, tag_id)
tag = dao.get_tag(user_id, tag_id)
return json.dumps(tag), 200
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['POST'])
@support_jsonp
def create_tag(user_id):
if (not request.json
or not 'tag_name' in request.json
or not 'start' in request.json):
abort(400)
tag_name = request.json.get("tag_name")
metadata = request.json.get("metadata")
start = request.json.get("start")
end = request.json.get("end")
#tag_id = "c1f6e1f2-c964-48c0-8cdd-fafe8336190b"
tag_id = dao.create_tag(user_id, tag_name, metadata, start, end)
return json.dumps({"tag_id": tag_id}), 500
""" Tag aggregates"""
def _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics):
aggregates = []
for metric in metrics:
aggregates.append(
{
"aggregate_id": "c1f6e1f2-c964-48c0-8cdd-fafe83361977",
"user_id": user_id,
"tag_id": tag_id,
"aggregate_type": "avg",
"device_type": device_type,
"aggregate_value": random.random() * 10,
"metric": metric,
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return aggregates
@app.route(('/api/%s/users/<string:user_id>/tags/<string:tag_id>/aggregates'
% _API_VERSION), methods=['GET'])
@support_jsonp
def get_tag_aggregate(user_id, tag_id):
"""Retrieve all aggregates for a specific tag and user"""
device_type = request.args.get('device_type', None)
metrics = request.args.getlist('metrics', None)
if device_type is None and len(metrics) == 0:
device_types = get_supported_devices()
for device_type in device_types:
metrics.extend(get_metrics_names(device_type))
elif len(metrics) == 0 and device_type is not None:
metrics = get_metrics_names(device_type)
elif len(metrics) > 0 and device_type is None:
return "parameter 'device_type' is required to filter on `metrics`", 500
#aggregates = _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics)
aggregates = dao.get_aggregates(user_id, tag_id, device_type, metrics)
return json.dumps(aggregates), 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=WEBSERVER_PORT)
| andyh616/cloudbrain | cloudbrain/apiservice/rest_api_server.py | Python | agpl-3.0 | 6,641 |
import os
from multiprocessing import cpu_count
_cpu_count = cpu_count()
if hasattr(os, 'getloadavg'):
def load_fair():
return 'load', os.getloadavg()[0] / _cpu_count
else:
from winperfmon import PerformanceCounter
from threading import Thread
from collections import deque
from time import sleep
class SystemLoadThread(Thread):
def __init__(self):
super(SystemLoadThread, self).__init__()
self.daemon = True
self.samples = deque(maxlen=10)
self.load = 0.5
self.counter = PerformanceCounter(r'\System\Processor Queue Length', r'\Processor(_Total)\% Processor Time')
def run(self):
while True:
pql, pt = self.counter.query()
self.samples.append(pql)
if pt >= 100:
self.load = max(sum(self.samples) / len(self.samples) / _cpu_count, pt / 100.)
else:
self.load = pt / 100.
sleep(1)
_load_thread = SystemLoadThread()
_load_thread.start()
def load_fair():
return 'load', _load_thread.load
def cpu_count():
return 'cpu-count', _cpu_count
report_callbacks = [load_fair, cpu_count]
| buhe/judge | sysinfo.py | Python | agpl-3.0 | 1,249 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import travel_accommodation_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ingadhoc/openerp-travel | travel_accommodation/wizard/__init__.py | Python | agpl-3.0 | 1,121 |
# -*- coding: UTF-8 -*-
logger.info("Loading 5 objects to table gfks_helptext...")
# fields: id, content_type, field, help_text
loader.save(create_gfks_helptext(1,contacts_Partner,u'language',u'Die Sprache, in der Dokumente ausgestellt werden sollen.'))
loader.save(create_gfks_helptext(2,gfks_HelpText,u'field',u'The name of the field.'))
loader.save(create_gfks_helptext(3,pcsw_Client,u'in_belgium_since',u'Since when this person in Belgium lives.\n<b>Important:</b> help_text can be formatted.'))
loader.save(create_gfks_helptext(4,pcsw_Client,u'noble_condition',u'The eventual noble condition of this person. Imported from TIM.'))
loader.save(create_gfks_helptext(5,contacts_Partner,u'language',u'Die Sprache, in der Dokumente ausgestellt werden sollen.'))
loader.flush_deferred_objects()
| lino-framework/welfare | lino_welfare/projects/gerd/tests/dumps/18.8.0/gfks_helptext.py | Python | agpl-3.0 | 794 |
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask import Flask, request
from flask.ext.restful import Resource, fields, marshal_with, reqparse, abort
from flask.globals import g
from jormungandr import i_manager, timezone
from jormungandr.interfaces.v1.fields import disruption_marshaller
from jormungandr.interfaces.v1.make_links import add_id_links
from jormungandr.interfaces.v1.fields import NonNullList, NonNullNested, PbField, error, pt_object, feed_publisher
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.argument import ArgumentDoc
from jormungandr.interfaces.parsers import depth_argument, option_value, default_count_arg_type, date_time_format
from copy import deepcopy
import datetime
pt_objects = {
"pt_objects": NonNullList(NonNullNested(pt_object), attribute='places'),
"disruptions": fields.List(NonNullNested(disruption_marshaller), attribute="impacts"),
"error": PbField(error, attribute='error'),
"feed_publishers": fields.List(NonNullNested(feed_publisher))
}
pt_object_type_values = ["network", "commercial_mode", "line", "line_group", "route", "stop_area"]
class Ptobjects(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(self, *args, **kwargs)
self.parsers = {}
self.parsers["get"] = reqparse.RequestParser(
argument_class=ArgumentDoc)
self.parsers["get"].add_argument("q", type=unicode, required=True,
description="The data to search")
self.parsers["get"].add_argument("type[]", type=option_value(pt_object_type_values),
action="append",default=pt_object_type_values,
description="The type of data to\
search")
self.parsers["get"].add_argument("count", type=default_count_arg_type, default=10,
description="The maximum number of\
ptobjects returned")
self.parsers["get"].add_argument("search_type", type=int, default=0,
description="Type of search:\
firstletter or type error")
self.parsers["get"].add_argument("admin_uri[]", type=unicode,
action="append",
description="If filled, will\
restrained the search within the\
given admin uris")
self.parsers["get"].add_argument("depth", type=depth_argument,
default=1,
description="The depth of objects")
self.parsers["get"].add_argument("_current_datetime", type=date_time_format, default=datetime.datetime.utcnow(),
description="The datetime used to consider the state of the pt object"
" Default is the current date and it is used for debug."
" Note: it will mainly change the disruptions that concern "
"the object The timezone should be specified in the format,"
" else we consider it as UTC")
@marshal_with(pt_objects)
def get(self, region=None, lon=None, lat=None):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
args = self.parsers["get"].parse_args()
self._register_interpreted_parameters(args)
if len(args['q']) == 0:
abort(400, message="Search word absent")
response = i_manager.dispatch(args, "pt_objects",
instance_name=self.region)
return response, 200
| ballouche/navitia | source/jormungandr/jormungandr/interfaces/v1/Ptobjects.py | Python | agpl-3.0 | 5,265 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.db import transaction
from education_group.ddd import command
from education_group.ddd.domain import exception
from education_group.ddd.domain.training import TrainingIdentity
from ddd.logic.formation_catalogue.builder.training_builder import TrainingBuilder
from education_group.ddd.repository import training as training_repository
@transaction.atomic()
def copy_training_to_next_year(copy_cmd: command.CopyTrainingToNextYearCommand) -> 'TrainingIdentity':
# GIVEN
repository = training_repository.TrainingRepository()
existing_training = repository.get(
entity_id=TrainingIdentity(acronym=copy_cmd.acronym, year=copy_cmd.postpone_from_year)
)
# WHEN
training_next_year = TrainingBuilder().copy_to_next_year(existing_training, repository)
# THEN
try:
with transaction.atomic():
identity = repository.create(training_next_year)
except exception.TrainingAcronymAlreadyExistException:
identity = repository.update(training_next_year)
return identity
| uclouvain/OSIS-Louvain | education_group/ddd/service/write/copy_training_service.py | Python | agpl-3.0 | 2,322 |
from .devstack import *
from .edraak_common import *
# WARNING: Don't just add/delete settings from here. Make sure the settings are
# reflected in `cms/envs/edraak_devstack.py`
| Edraak/edx-platform | lms/envs/edraak_devstack.py | Python | agpl-3.0 | 179 |
# Test mocks and helpers
from __future__ import absolute_import
from webob import Request
from xblock.runtime import DictKeyValueStore, KvsFieldData
from xblock.test.tools import TestRuntime
def make_request(body, method='POST'):
"""
Helper method to make request
"""
request = Request.blank('/')
request.body = body.encode('utf-8')
request.method = method
return request
# pylint: disable=abstract-method
class MockRuntime(TestRuntime):
"""
Provides a mock XBlock runtime object.
"""
def __init__(self, **kwargs):
field_data = kwargs.get('field_data', KvsFieldData(DictKeyValueStore()))
super(MockRuntime, self).__init__(field_data=field_data)
| open-craft/xblock-poll | tests/utils.py | Python | agpl-3.0 | 708 |
# -*- coding: utf-8 -*-
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.scenarios.ridesharing.instant_system import InstantSystem, DEFAULT_INSTANT_SYSTEM_FEED_PUBLISHER
from jormungandr.scenarios.ridesharing.ridesharing_journey import Gender
from jormungandr.scenarios.ridesharing.ridesharing_service import (
Ridesharing,
RsFeedPublisher,
RidesharingServiceError,
)
import mock
from jormungandr.tests import utils_test
from jormungandr import utils
import json
fake_response = """
{
"total": 2,
"journeys": [
{
"id": "4bcd0b9d-2c9d-42a2-8ffb-4508c952f4fb",
"departureDate": "2017-12-25T08:07:59+01:00",
"arrivalDate": "2017-12-25T08:25:36+01:00",
"duration": 1057,
"distance": 12650,
"url": "https://jky8k.app.goo.gl/?efr=1&apn=com.is.android.rennes&ibi=&isi=&utm_campaign=KISIO&link=https%3A%2F%2Fwww.star.fr%2Fsearch%2F%3FfeatureName%3DsearchResultDetail%26networkId%3D33%26journeyId%3D4bcd0b9d-2c9d-42a2-8ffb-4508c952f4fb",
"paths": [
{
"mode": "RIDESHARINGAD",
"from": {
"name": "",
"lat": 48.1102,
"lon": -1.68623
},
"to": {
"name": "",
"lat": 48.02479,
"lon": -1.74673
},
"departureDate": "2017-12-25T08:07:59+01:00",
"arrivalDate": "2017-12-25T08:25:36+01:00",
"shape": "wosdH|ihIRVDTFDzBjPNhADJ\\`C?TJt@Hj@h@tDp@bFR?bAFRBZDR@JCL@~AJl@Df@DfBNv@B~@DjAFh@HXH~@VbEfANDh@PdAl@\\RdAZnBHpADvBDf@@d@Gv@S\\OlAOl@EbAHjAVNDd@Dd@Mt@u@FGrE{EtBaBr@zCp@dDd@~BRtAHj@X`BFXlAjDLd@v@dDXlAh@TVl@hBtIB`ANpAh@nBf@xATf@Xd@JFPD@JHRLBLKDBbCbBbBbBjApA?VHPPBL`@\\^|BrBDHJ`@AP?PDRFL\\TRAJGRD`Al@jBhA~BbBx@VfALl@PHVDHPFNCVNdCnBpHzDdB|AfAjAj@h@^d@jAhBhAvA?^BNFJPHPCFGVNpBhApBt@ZL|B^dCJfDAZFLRHBNEJQZIdUa@b@JJ`@TXTFTAPKNUH]nBGtOb@vDd@`C`ArAp@zAjAnBnBJJh@h@`_@l`@fIvIfMhNl@t@dAzBnAnDx@xDh@jFfBbRdAnMdBnSjB|JbDbIhMj[rN`_@nEfJzCxDrCtDl@pBDtE^Bn@?h@?t@IdAe@XUFIvBaBvBaBf@Wl@OdAEfAJJXJHJBLCbAbAx@j@fBn@p@X`HfDdAd@NB\\CBLJDFCBI?OGILYn@gDb@uAVe@\\_@jEgDlFgARElBa@|G}AxFwA`AWv@YNI~AaArAg@bEw@pA[t@Y`B{@~BmAtAo@fAk@TYBBH?DGBKTEd@U^QlBcA^QvEcCP@Le@Cm@Eo@Ia@AI",
"rideSharingAd": {
"id": "24bab9de-653c-4cc4-a947-389c59cf0423",
"type": "DRIVER",
"from": {
"name": "9 Allée Rochester, Rennes",
"lat": 48.127905,
"lon": -1.652393
},
"to": {
"name": "2 Avenue Alphonse Legault, Bruz",
"lat": 48.024714,
"lon": -1.746711
},
"user": {
"alias": "Jean P.",
"gender": "MALE",
"imageUrl": "https://dummyimage.com/128x128/C8E6C9/000.png&text=JP",
"rating": {
"rate": 0,
"count": 0
}
},
"price": {
"amount": 170,
"currency": "EUR"
},
"vehicle": {
"availableSeats": 4
}
}
}
]
},
{
"id": "05223c04-834d-4710-905f-aa3796da5837",
"departureDate": "2017-12-25T08:35:42+01:00",
"arrivalDate": "2017-12-25T08:53:09+01:00",
"duration": 1047,
"distance": 11686,
"url": "https://jky8k.app.goo.gl/?efr=1&apn=com.is.android.rennes&ibi=&isi=&utm_campaign=KISIO&link=https%3A%2F%2Fwww.star.fr%2Fsearch%2F%3FfeatureName%3DsearchResultDetail%26networkId%3D33%26journeyId%3D05223c04-834d-4710-905f-aa3796da5837",
"paths": [
{
"mode": "RIDESHARINGAD",
"from": {
"name": "",
"lat": 48.1102,
"lon": -1.68623
},
"to": {
"name": "",
"lat": 48.03193,
"lon": -1.74635
},
"departureDate": "2017-12-25T08:35:42+01:00",
"arrivalDate": "2017-12-25T08:53:09+01:00",
"shape": "wosdH|ihIRVDTFDzBjPNhADJ\\`C?TJt@Hj@h@tDp@bFR?bAFRBZDR@JCL@~AJl@Df@DfBNv@B~@DjAFh@HXH~@VbEfANDh@PdAl@\\RdAZnBHpADvBDf@@d@Gv@S\\OlAOl@EbAHjAVNDd@Dd@Mt@u@FGrE{EtBaB`CeB|AeAr@cA`@_BNgCf@RTJ`@PjBr@fBz@~@d@~Av@`Af@d@TzBfAZPVLRJf@TbAj@zIpE|J`F\\PNHpBbAp@Z~Ax@PHdA\\ANFPH@DCDMxDpBt@fABTLXJFL@JEJI\\A`Bb@fABpAY|Aq@tCsAjBy@d@StHuDxAm@t@Qf@GX@VZTDTId@Fr@\\z@\\`Dz@|PrD`FbA|HjBNBZDdB`@`QfDnGfAxJhBvBd@vF`B~CzAhGvDnD|BxBtAbBnAd@f@b@h@BRP^VNZALId@?bBbAnMhIfH~D~@`@t@NjA@`@?VFVXJZAZBXLf@HNLJLFT@RERMz@~@~@r@b@d@Rd@`@zAGj@BZL`@HJBl@BzJFh@FbAL~@Pv@dAjDl@bBlBhFvBhFdCtCrGdE`IhFt@d@HF@?DBLJNL`EnClEpCpCfBp@^bBv@hAb@hBf@xBb@rIpAVNPL@NFNFHJDBXbA|GJt@bAzGt@dFl@vDZpCLhC@pBCxBYpDC`@GAODKRALD\\JLD@G^GVw@lHSjCK|B@vCPpMCnAKf@OHEJEVF\\FHNDHLH\\BZH|KA`E@N\\x@bBtBpGpGZV",
"rideSharingAd": {
"id": "4eb9a4ba-fb10-4cf0-bf4c-31ada02a2c91",
"type": "DRIVER",
"from": {
"name": "1 Boulevard Volney, Rennes",
"lat": 48.1247,
"lon": -1.666796
},
"to": {
"name": "9012 Rue du 8 Mai 1944, Bruz",
"lat": 48.031951,
"lon": -1.74641
},
"user": {
"alias": "Alice M.",
"gender": "FEMALE",
"imageUrl": "https://dummyimage.com/128x128/B2EBF2/000.png&text=AM",
"rating": {
"rate": 0,
"count": 0
}
},
"price": {
"amount": 0,
"currency": "EUR"
},
"vehicle": {
"availableSeats": 4
}
}
}
]
}
],
"url": "https://jky8k.app.goo.gl/?efr=1&apn=com.is.android.rennes&ibi=&isi=&utm_campaign=KISIO&link=https%3A%2F%2Fwww.star.fr%2Fsearch%2F%3FfeatureName%3DsearchResults%26networkId%3D33%26from%3D48.109377%252C-1.682103%26to%3D48.020335%252C-1.743929%26multimodal%3Dfalse%26departureDate%3D2017-12-25T08%253A00%253A00%252B01%253A00"
}
"""
# https://stackoverflow.com/a/9312242/1614576
import re
regex = re.compile(r'\\(?![/u"])')
fixed = regex.sub(r"\\\\", fake_response)
mock_get = mock.MagicMock(return_value=utils_test.MockResponse(json.loads(fixed), 200, '{}'))
DUMMY_INSTANT_SYSTEM_FEED_PUBLISHER = {'id': '42', 'name': '42', 'license': 'I dunno', 'url': 'http://w.tf'}
# A hack class
class DummyInstance:
name = ''
def get_ridesharing_service_test():
configs = [
{
"class": "jormungandr.scenarios.ridesharing.instant_system.InstantSystem",
"args": {
"service_url": "toto",
"api_key": "toto key",
"network": "N",
"rating_scale_min": 0,
"rating_scale_max": 10,
"crowfly_radius": 200,
"timeframe_duration": 1800,
"feed_publisher": DUMMY_INSTANT_SYSTEM_FEED_PUBLISHER,
},
},
{
"class": "jormungandr.scenarios.ridesharing.instant_system.InstantSystem",
"args": {
"service_url": "tata",
"api_key": "tata key",
"network": "M",
"rating_scale_min": 1,
"rating_scale_max": 5,
"crowfly_radius": 200,
"timeframe_duration": 1800,
},
},
]
services = Ridesharing.get_ridesharing_services(DummyInstance(), configs)
assert len(services) == 2
assert services[0].service_url == 'toto'
assert services[0].api_key == 'toto key'
assert services[0].network == 'N'
assert services[0].system_id == 'Instant System'
assert services[0].rating_scale_min == 0
assert services[0].rating_scale_max == 10
assert services[0]._get_feed_publisher() == RsFeedPublisher(**DUMMY_INSTANT_SYSTEM_FEED_PUBLISHER)
assert services[1].service_url == 'tata'
assert services[1].api_key == 'tata key'
assert services[1].network == 'M'
assert services[1].system_id == 'Instant System'
assert services[1].rating_scale_min == 1
assert services[1].rating_scale_max == 5
assert services[1]._get_feed_publisher() == RsFeedPublisher(**DEFAULT_INSTANT_SYSTEM_FEED_PUBLISHER)
def instant_system_test():
with mock.patch('requests.get', mock_get):
instant_system = InstantSystem(
DummyInstance(),
service_url='dummyUrl',
api_key='dummyApiKey',
network='dummyNetwork',
feed_publisher=DUMMY_INSTANT_SYSTEM_FEED_PUBLISHER,
rating_scale_min=0,
rating_scale_max=10,
)
from_coord = '48.109377,-1.682103'
to_coord = '48.020335,-1.743929'
period_extremity = utils.PeriodExtremity(
datetime=utils.str_to_time_stamp("20171225T060000"), represents_start=True
)
ridesharing_journeys, feed_publisher = instant_system.request_journeys_with_feed_publisher(
from_coord=from_coord, to_coord=to_coord, period_extremity=period_extremity
)
assert len(ridesharing_journeys) == 2
assert ridesharing_journeys[0].metadata.network == 'dummyNetwork'
assert ridesharing_journeys[0].metadata.system_id == 'Instant System'
assert ridesharing_journeys[0].metadata.rating_scale_min == 0
assert ridesharing_journeys[0].metadata.rating_scale_max == 10
assert (
ridesharing_journeys[0].ridesharing_ad
== 'https://jky8k.app.goo.gl/?efr=1&apn=com.is.android.rennes&ibi=&isi=&utm_campaign=KISIO&link=https%3A%2F%2Fwww.star.fr%2Fsearch%2F%3FfeatureName%3DsearchResultDetail%26networkId%3D33%26journeyId%3D4bcd0b9d-2c9d-42a2-8ffb-4508c952f4fb'
)
assert ridesharing_journeys[0].pickup_place.addr == "" # address is not provided in mock
assert ridesharing_journeys[0].pickup_place.lat == 48.1102
assert ridesharing_journeys[0].pickup_place.lon == -1.68623
assert ridesharing_journeys[0].dropoff_place.addr == "" # address is not provided in mock
assert ridesharing_journeys[0].dropoff_place.lat == 48.02479
assert ridesharing_journeys[0].dropoff_place.lon == -1.74673
assert len(ridesharing_journeys[0].shape) > 3
assert ridesharing_journeys[0].shape[0].lat == ridesharing_journeys[0].pickup_place.lat
assert ridesharing_journeys[0].shape[0].lon == ridesharing_journeys[0].pickup_place.lon
assert ridesharing_journeys[0].shape[1].lat == 48.1101 # test that we really load a shape
assert ridesharing_journeys[0].shape[1].lon == -1.68635
assert ridesharing_journeys[0].shape[-1].lat == ridesharing_journeys[0].dropoff_place.lat
assert ridesharing_journeys[0].shape[-1].lon == ridesharing_journeys[0].dropoff_place.lon
assert ridesharing_journeys[0].pickup_date_time == utils.str_to_time_stamp("20171225T070759")
assert ridesharing_journeys[0].dropoff_date_time == utils.str_to_time_stamp("20171225T072536")
assert ridesharing_journeys[0].driver.alias == 'Jean P.'
assert ridesharing_journeys[0].driver.gender == Gender.MALE
assert ridesharing_journeys[0].driver.image == 'https://dummyimage.com/128x128/C8E6C9/000.png&text=JP'
assert ridesharing_journeys[0].driver.rate == 0
assert ridesharing_journeys[0].driver.rate_count == 0
assert ridesharing_journeys[0].price == 170
assert ridesharing_journeys[0].currency == 'centime'
assert ridesharing_journeys[0].total_seats is None
assert ridesharing_journeys[0].available_seats == 4
assert ridesharing_journeys[1].metadata.network == 'dummyNetwork'
assert ridesharing_journeys[1].metadata.system_id == 'Instant System'
assert ridesharing_journeys[1].metadata.rating_scale_min == 0
assert ridesharing_journeys[1].metadata.rating_scale_max == 10
# the shape should not be none, but we don't test the whole string
assert ridesharing_journeys[1].shape
assert (
ridesharing_journeys[1].ridesharing_ad
== "https://jky8k.app.goo.gl/?efr=1&apn=com.is.android.rennes&ibi=&isi=&utm_campaign=KISIO&link=https%3A%2F%2Fwww.star.fr%2Fsearch%2F%3FfeatureName%3DsearchResultDetail%26networkId%3D33%26journeyId%3D05223c04-834d-4710-905f-aa3796da5837"
)
assert ridesharing_journeys[1].pickup_place.addr == ""
assert ridesharing_journeys[1].pickup_place.lat == 48.1102
assert ridesharing_journeys[1].pickup_place.lon == -1.68623
assert ridesharing_journeys[1].dropoff_place.addr == ""
assert ridesharing_journeys[1].dropoff_place.lat == 48.03193
assert ridesharing_journeys[1].dropoff_place.lon == -1.74635
assert ridesharing_journeys[1].pickup_date_time == utils.str_to_time_stamp("20171225T073542")
assert ridesharing_journeys[1].dropoff_date_time == utils.str_to_time_stamp("20171225T075309")
assert ridesharing_journeys[1].driver.alias == 'Alice M.'
assert ridesharing_journeys[1].driver.gender == Gender.FEMALE
assert ridesharing_journeys[1].driver.image == 'https://dummyimage.com/128x128/B2EBF2/000.png&text=AM'
assert ridesharing_journeys[1].driver.rate == 0
assert ridesharing_journeys[1].driver.rate_count == 0
assert ridesharing_journeys[1].price == 0
assert ridesharing_journeys[1].currency == 'centime'
assert ridesharing_journeys[1].total_seats is None
assert ridesharing_journeys[1].available_seats == 4
assert feed_publisher == RsFeedPublisher(**DUMMY_INSTANT_SYSTEM_FEED_PUBLISHER)
import requests_mock
import pytest
def test_request_journeys_should_raise_on_non_200():
with requests_mock.Mocker() as mock:
instant_system = InstantSystem(
DummyInstance(), service_url='http://instant.sys', api_key='ApiKey', network='Network'
)
mock.get('http://instant.sys', status_code=401, text='{this is the http response}')
with pytest.raises(RidesharingServiceError) as e:
instant_system._request_journeys(
'1.2,3.4',
'5.6,7.8',
utils.PeriodExtremity(
datetime=utils.str_to_time_stamp("20171225T060000"), represents_start=True
),
)
exception_params = e.value.get_params().values()
assert 401 in exception_params
assert '{this is the http response}' in exception_params
| xlqian/navitia | source/jormungandr/jormungandr/scenarios/ridesharing/tests/instant_system_tests.py | Python | agpl-3.0 | 15,730 |
from PyQt4 import QtCore, QtGui
class Page(QtGui.QWidget):
""" A container and dropevent catcher for widgets """
def __init__(self, parent, pihud):
super(Page, self).__init__(parent)
self.setAcceptDrops(True)
self.pihud = pihud # normally, this would simply be the parent()
self.widgets = []
self.show()
def dragEnterEvent(self, e):
e.accept()
def dropEvent(self, e):
# get relative position of mouse from mimedata
mime = e.mimeData().text()
x, y = map(int, mime.split(','))
e.source().move(e.pos() - QtCore.QPoint(x, y))
e.setDropAction(QtCore.Qt.MoveAction)
e.accept()
def delete_widget(self, widget):
# refer all deletion requests to the main window (PiHud.py)
self.pihud.delete_widget(self, widget)
| brendan-w/piHud | pihud/Page.py | Python | lgpl-2.1 | 846 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hashcat(MakefilePackage):
"""hashcat is the world's fastest and most advanced password recovery
utility, supporting five unique modes of attack for over 300 highly
optimized hashing algorithms. hashcat currently supports CPUs, GPUs,
and other hardware accelerators on Linux, Windows, and macOS,and has
facilities to help enable distributed password cracking."""
homepage = "https://hashcat.net/hashcat/"
url = "https://github.com/hashcat/hashcat/archive/v6.1.1.tar.gz"
version('6.1.1', sha256='39c140bbb3c0bdb1564bfa9b9a1cff49115a42f4c9c19e9b066b617aea309f80')
version('6.1.0', sha256='916f92434e3b36a126be1d1247a95cd3b32b4d814604960a2ca325d4cc0542d1')
version('6.0.0', sha256='e8e70f2a5a608a4e224ccf847ad2b8e4d68286900296afe00eb514d8c9ec1285')
version('5.1.0', sha256='283beaa68e1eab41de080a58bb92349c8e47a2bb1b93d10f36ea30f418f1e338')
version('5.0.0', sha256='7092d98cf0d8b29bd6efe2cf94802442dd8d7283982e9439eafbdef62b0db08f')
def install(self, spec, prefix):
make('SHARED=1', 'PREFIX={0}'.format(prefix), 'install')
| iulian787/spack | var/spack/repos/builtin/packages/hashcat/package.py | Python | lgpl-2.1 | 1,313 |
#!/usr/bin/python
import os, sys, signal, random
from datetime import datetime, timedelta
VLC_POOL_SIZE = 10 #number of vlc client to keep open
SPAWN_TIMER = 5 #number of seconds between vlc spawn
MAX_VLC_LIFE_TIME = 60 #max life time in seconds of a vlc client
VLC_COMMAND = '/usr/bin/vlc'
class Vlc(object):
def __init__(self, uri):
super(Vlc, self).__init__()
self.pid = None
self.uri = uri
self.spawn_time = None
def _close_all_open_fd(self):
for fd in xrange(0, os.sysconf('SC_OPEN_MAX')):
try:
os.close(fd)
except OSError:
pass
def run(self):
if self.pid:
return False
pid = os.fork()
if pid:
self.pid = pid
self.spawn_time = datetime.now()
return True
else:
self._close_all_open_fd()
os.execvp(VLC_COMMAND, ['vlc', self.uri])
return None
def stop(self):
if not self.pid:
return False
try:
os.kill(self.pid, signal.SIGTERM)
os.waitpid(self.pid, 0)
except Exception, e:
print 'Vlc wasn\'t here anymore', e
pass
return True
def main(url):
random.seed()
last_spawn = datetime.now() - timedelta(0, SPAWN_TIMER)
vlc_pool = []
while True:
to_remove = []
now = datetime.now()
if (now - last_spawn >= timedelta(0, SPAWN_TIMER)) and (len(vlc_pool) < VLC_POOL_SIZE):
last_spawn = now
vlc = Vlc(url)
print 'Running a new vlc'
state = vlc.run()
if state:
vlc_pool.append(vlc)
elif state == None:
print 'Vlc Client exited by itself?'
return
else:
print 'Failed to start Vlc'
for vlc in vlc_pool:
if now - vlc.spawn_time >= timedelta(0, MAX_VLC_LIFE_TIME):
if random.random() >= 0.5:
print 'Stopping an old vlc started at', vlc.spawn_time
vlc.stop()
to_remove.append(vlc)
if len(to_remove) and random.random() > 0.95:
for vlc in vlc_pool:
if not vlc in to_remove:
print 'Stopping multiple vlcs', vlc.spawn_time
vlc.stop()
to_remove.append(vlc)
for vlc in to_remove:
vlc_pool.remove(vlc)
if __name__ == '__main__':
if len(sys.argv) != 2:
print '%s requires an rtsp url to request' % sys.argv[0]
else:
main(sys.argv[1])
| lscube/feng | contrib/feng_destroyer.py | Python | lgpl-2.1 | 2,136 |
# -*- coding: utf-8 -*-
# Copyright(C) 2016 Edouard Lambert
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from .module import BinckModule
__all__ = ['BinckModule']
| laurentb/weboob | modules/binck/__init__.py | Python | lgpl-3.0 | 843 |
################################################################################
# #
# Coauthors: Antoine Eddi (@antoeddi) & Philippe Desmaison (@desmaisn) #
# #
# Team: EMEA Partner Solutions Architects #
# Date: March 2016 #
# #
################################################################################
## Import ##
from flask import Flask, jsonify, request, send_from_directory, make_response
from itsdangerous import Signer
import urllib2
from geopy.geocoders import Nominatim
from PIL import Image
import cv2, numpy
import uuid, os, random, time, json
## Init ##
# Init flask object
app = Flask(__name__)
# Init geopy object
geolocator = Nominatim()
# Init cookie signer with our secret key
signer = Signer("*{0!0C}]P1>12&y,(r*(8K :z*q43&\>8d+&P{g0_1'OU*U+<3'~'V0D:\*pY1!Z")
# Get useful directories path
current_dir = os.path.dirname(os.path.realpath(__file__))
process_dir = current_dir + '/process_img/'
assets_dir = process_dir + 'assets/'
## Aux. functions - image processing / mustache part ##
# Get noses coordinates with OpenCV
def get_noses_coord(image):
cascades_dir = current_dir + '/haarcascades/'
face_cascade = cv2.CascadeClassifier(cascades_dir + 'frontalface_default.xml')
nose_cascade = cv2.CascadeClassifier(cascades_dir + 'mcs_nose.xml')
img_gray = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2GRAY)
faces = face_cascade.detectMultiScale(
img_gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
noses_coord = []
for face in faces:
x, y, w, h = face
roi = img_gray[y: y + h, x: x + w]
noses = nose_cascade.detectMultiScale(
roi,
scaleFactor=1.2,
minNeighbors=8,
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(noses):
nx, ny, nw, nh = noses[0]
noses_coord.append((nx + x, ny + y, nw, nh))
return noses_coord
# Calculate mustache size and position
def calc_mustache_coord(nose_coord, must_img_size):
x, y, w, h = nose_coord
ratio = (w * 3) / float(must_img_size[0])
mx = x - w
my = ((65 * h) / 100) + y
mw = int(must_img_size[0] * ratio)
mh = int(must_img_size[1] * ratio)
return ((mw, mh), (mx, my))
# Paste mustache(s) on the top layer
def add_mustaches(image, top_layer):
noses_coord = get_noses_coord(image)
for nose_coord in noses_coord:
random_num = str(random.randint(1, 11))
mustache_img = Image.open(assets_dir + 'mustaches/' + random_num + '.png')
size, offset = calc_mustache_coord(nose_coord, mustache_img.size)
img_paste = mustache_img.resize(size)
top_layer.paste(img_paste, offset)
return len(noses_coord)
## Aux. functions - image processing / logo part ##
# Calculate logo size
def calc_logo_size(image_size, logo_size):
ratio = float(image_size[0] / 4) / logo_size[0]
new_size = [0, 0]
new_size[0] = int(logo_size[0] * ratio);
new_size[1] = int(logo_size[1] * ratio);
return (new_size[0], new_size[1])
# Calculate logo offset
def calc_logo_offset(image_size, logo_size):
margin = int(logo_size[1] / 3)
offset = [0, 0]
offset[0] = image_size[0] - logo_size[0] - margin
offset[1] = image_size[1] - logo_size[1] - margin
return (offset[0], offset[1])
# Paste AWS logo on the top layer
def add_logo(image, top_layer):
logo = Image.open(assets_dir + 'logo.jpg')
size = calc_logo_size(image.size, logo.size)
offset = calc_logo_offset(image.size, size)
logo = logo.resize(size)
top_layer.paste(logo, offset)
# Resize image if (H or W > 1200), add logo and mustaches to a top layer then return image merged with top layer
def process_image(base_file):
image = Image.open(base_file).convert('RGB')
if image.size[0] > 1200 or image.size[1] > 1200:
ratio = min(1200./image.size[0], 1200./image.size[1])
image = image.resize((int(image.size[0] * ratio), int(image.size[1] * ratio)))
top_layer = Image.new('RGBA', image.size, (0,0,0,0))
add_logo(image, top_layer)
count = add_mustaches(image, top_layer)
processed = Image.composite(top_layer, image, top_layer)
return processed, count
## Aux. functions - image saving ##
# Check if filename already exist, until is unavailable append '-2'
def get_valid_save_filename(current_filename, save_dir):
split = current_filename.split('.')
current_filename = '.'.join(split[:-1])
extension = split[-1]
list_dir = os.listdir(save_dir)
while 42:
if any(item.startswith(current_filename) for item in list_dir):
current_filename += '-2'
else:
break
save_filename = current_filename + '.' + extension
return save_filename
# Save image file into the right folder
def save_image(uuid, image, filename, extension):
save_dir = process_dir + 'processed/' + uuid + '/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_filename = get_valid_save_filename(filename, save_dir)
image.save(save_dir + save_filename, extension)
return save_filename
## Main functions - image related ##
# POST handler for image uploading / processing, GET handler for image downloading
@app.route('/image', methods=['GET', 'POST'])
def image_handling():
if request.method == 'POST':
file = request.files['file']
image, count = process_image(file)
extension = file.content_type.split('/')[1]
filename = file.filename
cookie = get_cookie_data(request)
save_filename = save_image(cookie['uuid'], image, filename, extension)
total = count + cookie['count']
response = make_response(jsonify(filename=save_filename, count=total))
set_cookie(response, cookie['uuid'], total)
return response
elif request.method == 'GET':
filepath = request.args.get('filepath')
return send_from_directory(process_dir + 'processed/', filepath)
# GET handler for images listing
@app.route('/images/list', methods=['GET'])
def list_images():
cookie = get_cookie_data(request)
user_dir = process_dir + 'processed/' + cookie['uuid'] + '/'
img_list = []
if os.path.isdir(user_dir):
img_list = os.listdir(user_dir)
img_list.sort(key=lambda x: os.path.getctime(os.path.join(user_dir, x)) * -1)
return jsonify(list=img_list)
## Main functions - info related ##
# GET handlers returning public IP, postal address from GPS point and instance meta-data
@app.route('/infos/<param>', methods=['GET'])
def get_infos(param):
if param == 'ip':
return jsonify(public_ip=request.remote_addr)
elif param == 'location':
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
location = geolocator.reverse(latitude + "," + longitude)
splits = location.address.split(', ')
if 8 <= len(splits) <= 9:
x = len(splits) - 9
address = " ".join((splits[1 + x] + " -", splits[7 + x], splits[2 + x] + ",", splits[8 + x]))
else:
address = location.address
return jsonify(address=address)
elif param == 'instance':
az = urllib2.urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone').read()
instance = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read()
return jsonify(az=az, instance=instance)
# POST handler storing client infos into local file
@app.route('/infos', methods=['POST'])
def send_infos():
cookie = get_cookie_data(request)
infos = request.json
save_dir = current_dir + '/logs/' + cookie['uuid'] + '/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filename = time.strftime("%Y-%m-%d_%H%M:%S", time.gmtime())
with open(save_dir + filename + '.json', 'w') as outfile:
json.dump(infos, outfile)
return ''
## Aux. functions - managing cookie ##
# Check if cookie contain valid signed values
def is_valid_cookie(cookie):
try:
unsigned = signer.unsign(cookie)
return unsigned
except:
return ''
# Generate a new uuid that is not used already
def get_available_uuid():
processed_dir = process_dir + 'processed/'
if not os.path.isdir(processed_dir):
os.makedirs(processed_dir)
uuid_unavailable = os.listdir(processed_dir);
while 42:
new_uuid = str(uuid.uuid1())
if new_uuid not in uuid_unavailable:
return new_uuid
# Get cookie values
def get_cookie_data(request):
cookie = request.cookies.get('Session')
cookie_raw = is_valid_cookie(cookie).split('.')
cookie_data = {'uuid':cookie_raw[0], 'count':int(cookie_raw[1])}
return cookie_data
# Generate cookie that expire in 2 weeks
def set_cookie(response, uuid, count):
signed = signer.sign(uuid + '.' + str(count))
expires_date = time.time() + 14 * 24 * 3600
expires_date = time.strftime("%a, %d-%b-%Y %T GMT", time.gmtime(expires_date))
response.set_cookie('Session', signed, expires=expires_date)
## Main functions - cookie related ##
# Check if cookie from request is valid, if not generate a new one
@app.route('/cookie', methods=['GET'])
def get_cookie():
cookie = request.cookies.get('Session')
if not cookie or not is_valid_cookie(cookie):
uuid = get_available_uuid()
count = 0
else:
valid_data = get_cookie_data(request)
uuid = valid_data['uuid']
count = valid_data['count']
response = make_response(jsonify(uuid=uuid, count=count))
set_cookie(response, uuid, count)
return response
if __name__ == '__main__':
app.run()
| calinerd/AWS | FULL_LABS/MustacheMe/SourceCode/src/MustacheMe/MustacheMeProcessor/api-static/serv.py | Python | unlicense | 10,036 |
#!/usr/bin/python
import sys
import os
if len(sys.argv) >= 4 :
filename = sys.argv[1]
row_i = int(sys.argv[2])-1
target_ls_filename = sys.argv[3]
output_filename = sys.argv[4]
else:
print("usage: python selectrow.py filename row_i target_ls_filename")
print("or ./selectrow.py filename row_i target_ls_filename")
sys.exit(1)
################################################################################
file = open(filename,'r')
dt = {}
for line in file:
ls=line.strip().split('\t')
if not dt.has_key(ls[row_i]):
dt[ ls[row_i] ] = []
dt[ ls[row_i] ].append( line.strip() )
file.close()
################################################################################
output = open(output_filename,'w')
target_ls_file = open(target_ls_filename, 'r')
for line in target_ls_file:
id = line.strip()
if not dt.has_key(id):
print id
continue
if len(dt[id])>1:
print id + '\t' + str(len(dt[id]))
for item in dt[id]:
output.write( item + '\n')
output.close()
target_ls_file.close()
| jason-weirather/IDP | bin/selectrow.py | Python | apache-2.0 | 1,076 |
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
class BaseSolutionComparator(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def compare(self, sol1, sol2):
raise NotImplementedError()
| stackforge/watcher | watcher/decision_engine/solution/solution_comparator.py | Python | apache-2.0 | 832 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.map_message_type import *
REQUEST_TYPE = MAP_EXECUTEONALLKEYS
RESPONSE_TYPE = 117
RETRYABLE = False
def calculate_size(name, entry_processor):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_data(entry_processor)
return data_size
def encode_request(name, entry_processor):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, entry_processor))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_data(entry_processor)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for response_index in xrange(0, response_size):
response_item = (client_message.read_data(), client_message.read_data())
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters
| cangencer/hazelcast-python-client | hazelcast/protocol/codec/map_execute_on_all_keys_codec.py | Python | apache-2.0 | 1,451 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=invalid-name
"""
Hangouts Chat bot that responds to events and messages from a room
synchronously. The bot formats the response using cards,
inserting widgets based upon the user's original input.
"""
import logging
from flask import Flask, render_template, request, json
app = Flask(__name__)
INTERACTIVE_TEXT_BUTTON_ACTION = "doTextButtonAction"
INTERACTIVE_IMAGE_BUTTON_ACTION = "doImageButtonAction"
INTERACTIVE_BUTTON_PARAMETER_KEY = "param_key"
BOT_HEADER = 'Card Bot Python'
@app.route('/', methods=['POST'])
def home_post():
"""Respond to POST requests to this endpoint.
All requests sent to this endpoint from Hangouts Chat are POST
requests.
"""
event_data = request.get_json()
resp = None
# If the bot is removed from the space, it doesn't post a message
# to the space. Instead, log a message showing that the bot was removed.
if event_data['type'] == 'REMOVED_FROM_SPACE':
logging.info('Bot removed from %s', event_data['space']['name'])
return 'OK'
if event_data['type'] == 'ADDED_TO_SPACE' and event_data['space']['type'] == 'ROOM':
resp = {'text': ('Thanks for adding me to {}!'
.format(event_data['space']['name']))}
elif event_data['type'] == 'ADDED_TO_SPACE' and event_data['space']['type'] == 'DM':
resp = {'text': ('Thanks for adding me to a DM, {}!'
.format(event_data['user']['displayName']))}
elif event_data['type'] == 'MESSAGE':
resp = create_card_response(event_data['message']['text'])
elif event_data['type'] == 'CARD_CLICKED':
action_name = event_data['action']['actionMethodName']
parameters = event_data['action']['parameters']
resp = respond_to_interactive_card_click(action_name, parameters)
logging.info(resp)
return json.jsonify(resp)
@app.route('/', methods=['GET'])
def home_get():
"""Respond to GET requests to this endpoint.
This function responds to requests with a simple HTML landing page for this
App Engine instance.
"""
return render_template('home.html')
def create_card_response(event_message):
"""Creates a card response based on the message sent in Hangouts Chat.
See the reference for JSON keys and format for cards:
https://developers.google.com/hangouts/chat/reference/message-formats/cards
Args:
eventMessage: the user's message to the bot
"""
response = dict()
cards = list()
widgets = list()
header = None
words = event_message.lower().split()
for word in words:
if word == 'header':
header = {
'header': {
'title': BOT_HEADER,
'subtitle': 'Card header',
'imageUrl': 'https://goo.gl/5obRKj',
'imageStyle': 'IMAGE'
}
}
elif word == 'textparagraph':
widgets.append({
'textParagraph': {
'text': '<b>This</b> is a <i>text paragraph</i>.'
}
})
elif word == 'keyvalue':
widgets.append({
'keyValue': {
'topLabel': 'KeyValue Widget',
'content': 'This is a KeyValue widget',
'bottomLabel': 'The bottom label',
'icon': 'STAR'
}
})
elif word == 'interactivetextbutton':
widgets.append({
'buttons': [
{
'textButton': {
'text': 'INTERACTIVE BUTTON',
'onClick': {
'action': {
'actionMethodName': INTERACTIVE_TEXT_BUTTON_ACTION,
'parameters': [{
'key': INTERACTIVE_BUTTON_PARAMETER_KEY,
'value': event_message
}]
}
}
}
}
]
})
elif word == 'interactiveimagebutton':
widgets.append({
'buttons': [
{
'imageButton': {
'icon': 'EVENT_SEAT',
'onClick': {
'action': {
'actionMethodName': INTERACTIVE_IMAGE_BUTTON_ACTION,
'parameters': [{
'key': INTERACTIVE_BUTTON_PARAMETER_KEY,
'value': event_message
}]
}
}
}
}
]
})
elif word == 'textbutton':
widgets.append({
'buttons': [
{
'textButton': {
'text': 'TEXT BUTTON',
'onClick': {
'openLink': {
'url': 'https://developers.google.com',
}
}
}
}
]
})
elif word == 'imagebutton':
widgets.append({
'buttons': [
{
'imageButton': {
'icon': 'EVENT_SEAT',
'onClick': {
'openLink': {
'url': 'https://developers.google.com',
}
}
}
}
]
})
elif word == 'image':
widgets.append({
'image': {
'imageUrl': 'https://goo.gl/Bpa3Y5',
'onClick': {
'openLink': {
'url': 'https://developers.google.com'
}
}
}
})
if header is not None:
cards.append(header)
cards.append({'sections': [{'widgets': widgets}]})
response['cards'] = cards
return response
def respond_to_interactive_card_click(action_name, custom_params):
"""Creates a response for when the user clicks on an interactive card.
See the guide for creating interactive cards
https://developers.google.com/hangouts/chat/how-tos/cards-onclick
Args:
action_name: the name of the custom action defined in the original bot response
custom_params: the parameters defined in the original bot response
"""
message = 'You clicked {}'.format(
'a text button' if action_name == INTERACTIVE_TEXT_BUTTON_ACTION
else 'an image button')
original_message = ""
if custom_params[0]['key'] == INTERACTIVE_BUTTON_PARAMETER_KEY:
original_message = custom_params[0]['value']
else:
original_message = '<i>Cannot determine original message</i>'
# If you want to respond to the same room but with a new message,
# change the following value to NEW_MESSAGE.
action_response = 'UPDATE_MESSAGE'
return {
'actionResponse': {
'type': action_response
},
'cards': [
{
'header': {
'title': BOT_HEADER,
'subtitle': 'Interactive card clicked',
'imageUrl': 'https://goo.gl/5obRKj',
'imageStyle': 'IMAGE'
}
},
{
'sections': [
{
'widgets': [
{
'textParagraph': {
'text': message
}
},
{
'keyValue': {
'topLabel': 'Original message',
'content': original_message
}
}
]
}
]
}
]
}
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| gsuitedevs/hangouts-chat-samples | python/card-bot/main.py | Python | apache-2.0 | 9,379 |
def testHasMasterPrimary(txnPoolNodeSet):
masterPrimaryCount = 0
for node in txnPoolNodeSet:
masterPrimaryCount += int(node.monitor.hasMasterPrimary)
assert masterPrimaryCount == 1
| evernym/zeno | plenum/test/monitoring/test_monitor_attributes.py | Python | apache-2.0 | 201 |
__author__ = 'Ostico <ostico@gmail.com>'
import sys
import os
import unittest
from pyorient.exceptions import *
from pyorient import OrientSocket
from pyorient.messages.database import *
from pyorient.messages.commands import *
from pyorient.messages.cluster import *
from pyorient.messages.records import *
from pyorient.messages.connection import *
from pyorient.constants import DB_TYPE_DOCUMENT, QUERY_SYNC, \
STORAGE_TYPE_PLOCAL, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY
os.environ['DEBUG'] = "0"
os.environ['DEBUG_VERBOSE'] = "0"
if os.path.realpath( '../' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '../' ) )
if os.path.realpath( '.' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '.' ) )
class RawMessages_2_TestCase(unittest.TestCase):
""" Command Test Case """
def test_record_object(self):
x = OrientRecord()
assert x._rid is None
assert x._version is None
assert x._class is None
def test_record_load(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
req_msg = RecordLoadMessage( connection )
res = req_msg.prepare( [ "#11:0", "*:2", _test_callback ] ) \
.send().fetch_response()
assert res._rid == "#11:0"
assert res._class == 'followed_by'
assert res._in != 0
assert res._out != 0
def test_record_count_with_no_opened_db(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
conn_msg = ConnectMessage( connection )
session_id = conn_msg.prepare( ("root", "root") )\
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
try:
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert False # we expect an exception because we need a db opened
except PyOrientDatabaseException:
assert True
def test_record_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
session_id = connection.session_id
assert session_id != -1
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert res is not 0
assert res > 0
def test_record_create_update(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
# ##################
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_GRAPH, "")
).send().fetch_response()
assert len(cluster_info) != 0
try:
create_class = CommandMessage(connection)
cluster = create_class.prepare((QUERY_CMD, "create class my_class "
"extends V"))\
.send().fetch_response()[0]
except PyOrientCommandException:
# class my_class already exists
pass
# classes are not allowed in record create/update/load
rec = { '@my_class': { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' } }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( cluster, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
rec = { '@my_class': { 'alloggio': 'albergo', 'lavoro': 'ufficio', 'vacanza': 'montagna' } }
update_success = ( RecordUpdateMessage(connection) )\
.prepare( ( cluster, rec_position._rid, rec ) )\
.send().fetch_response()
assert update_success[0] != 0
if connection.protocol <= 21:
return unittest.skip("Protocol {!r} does not works well".format(
connection.protocol )) # skip test
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + rec_position._rid ] )\
.send().fetch_response()
# res = [ ( RecordLoadMessage(connection) ).prepare(
# [ rec_position._rid ]
# ).send().fetch_response() ]
print("%r" % res[0]._rid)
print("%r" % res[0]._class)
print("%r" % res[0]._version)
print("%r" % res[0].alloggio)
print("%r" % res[0].lavoro)
print("%r" % res[0].vacanza)
assert res[0]._rid == '#11:0'
# assert res[0]._class == 'my_class'
assert res[0]._version >= 0
assert res[0].alloggio == 'albergo'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'montagna'
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_record_delete(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
print("Sid: %s" % session_id)
assert session_id == connection.session_id
assert session_id != -1
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_DOCUMENT, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
rec = { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( 1, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
######################## Check Success
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + str(rec_position._rid) ] )\
.send().fetch_response()
import re
assert re.match( '#1:[0-9]', res[0]._rid )
assert res[0]._class is None
assert res[0]._version >= 0
assert res[0].alloggio == 'casa'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'mare'
######################## Delete Rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, rec_position._rid ) )\
.send().fetch_response()
assert deletion is True
# now try a failure in deletion for wrong rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, 11111 ) )\
.send().fetch_response()
assert deletion is False
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_data_cluster_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
print(cluster_info)
assert len(cluster_info) != 0
assert connection.session_id != -1
count_msg = DataClusterCountMessage( connection )
res1 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5) ] ).send().fetch_response()
assert res1 is not 0
assert res1 > 0
count_msg = DataClusterCountMessage( connection )
res2 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5), 1 ] ).send().fetch_response()
assert res2 is not 0
assert res2 > 0
count_msg = DataClusterCountMessage( connection )
res3 = count_msg.set_count_tombstones(1).set_cluster_ids( (0,1,2,3,4,5) )\
.prepare().send().fetch_response()
assert res3 is not 0
assert res3 > 0
assert res1 == res2
assert res3 == res2
assert res3 == res1
def test_query_async(self):
connection = OrientSocket( 'localhost', 2424 )
open_msg = DbOpenMessage(connection)
open_msg.set_db_name('GratefulDeadConcerts')\
.set_user('admin').set_pass('admin').prepare()\
.send().fetch_response()
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
try_select_async = CommandMessage(connection)
try_select_async.set_command_type(QUERY_ASYNC)\
.set_query("select from followed_by")\
.set_limit(50)\
.set_fetch_plan("*:0")\
.set_callback( _test_callback )\
.prepare()\
response = try_select_async.send().fetch_response()
assert response is None
def test_wrong_data_range(self):
connection = OrientSocket( 'localhost', 2424 )
db_name = "GratefulDeadConcerts"
db = DbOpenMessage(connection)
cluster_info = db.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
datarange = DataClusterDataRangeMessage(connection)
try:
value = datarange.prepare(32767).send().fetch_response()
except PyOrientCommandException as e:
print(repr(str(e)))
assert "IndexOutOfBoundsException" in str(e)
def test_data_range(self):
connection = OrientSocket( 'localhost', 2424 )
db_name = "GratefulDeadConcerts"
db = DbOpenMessage(connection)
_, clusters, _ = db.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
clusters.sort(key=lambda cluster: cluster.id)
for cluster in clusters:
# os.environ['DEBUG'] = '0' # silence debug
datarange = DataClusterDataRangeMessage(connection)
value = datarange.prepare(cluster.id).send().fetch_response()
print("Cluster Name: %s, ID: %u: %s "\
% (cluster.name, cluster.id, value))
assert value is not []
assert value is not None
# x = RawMessages_2_TestCase('test_wrong_data_range').run()
| mogui/pyorient | tests/test_raw_messages_2.py | Python | apache-2.0 | 12,897 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
import time
from enable.component_editor import ComponentEditor
from traits.api import Instance
from traitsui.api import View, UItem
# ============= standard library imports ========================
from numpy import array
# ============= local library imports ==========================
from pychron.lasers.scanner import Scanner
from pychron.lasers.tasks.editors.laser_editor import LaserEditor
from pychron.managers.data_managers.csv_data_manager import CSVDataManager
from pychron.paths import paths
from six.moves import range
class PyrometerCalibrationScanner(Scanner):
_eq_tol = 1.5
_eq_std = 5
def _write_calibration(self, data):
dm = self.csv_data_manager
dm.write_to_frame(data)
def start_control_hook(self, ydict):
self.csv_data_manager = dm = CSVDataManager()
p = os.path.join(paths.data_dir, "pyrometer_calibration")
dm.new_frame(directory=p)
def _maintain_setpoint(self, t, d):
if d == "equilibrate":
py, tc = self._equilibrate(t)
self._write_calibration((t, py, tc))
else:
super(PyrometerCalibrationScanner, self)._maintain_setpoint(t, d)
def _equilibrate(self, ctemp):
# ctemp=self._current_setpoint
# ctemp = self.manager.map_temperature(temp)
py = self.manager.get_device("pyrometer")
tc = self.manager.get_device("temperature_monitor")
temps = []
n = 10
tol = self._eq_tol
std = self._eq_std
while self._scanning:
sti = time.time()
# py_t = py.get()
ref_t = py.temperature
temps.append(ref_t)
# ttemps.append(tc_t)
ns = array(temps[-n:])
# ts = array(ttemps[-n:])
if abs(ns.mean() - ctemp) < tol and ns.std() < std:
break
elapsed = time.time() - sti
time.sleep(max(0.0001, min(1, 1 - elapsed)))
nn = 30
ptemps = []
ctemps = []
for _ in range(nn):
if not self._scanning:
break
sti = time.time()
py_t = py.temperature
tc_t = tc.process_value
ptemps.append(py_t)
ctemps.append(tc_t)
elapsed = time.time() - sti
time.sleep(max(0.0001, min(1, 1 - elapsed)))
return array(ptemps).mean(), array(ctemps).mean()
class PyrometerCalibrationEditor(LaserEditor):
scanner = Instance(Scanner)
def stop(self):
self.scanner.stop()
def _scan_pyrometer(self):
d = self._pyrometer
return d.read_temperature(verbose=False)
def _scan_thermocouple(self):
d = self._thermocouple
return d.read_temperature(verbose=False)
def _do_execute(self):
p = os.path.join(paths.scripts_dir, "pyrometer_calibration.yaml")
s = PyrometerCalibrationScanner(control_path=p, manager=self._laser_manager)
s.setup("pyrometer_calibration", "scan")
self._pyrometer = self._laser_manager.get_device("pyrometer")
self._thermocouple = self._laser_manager.get_device("temperature_monitor")
s.new_function(self._scan_pyrometer, name="pyrometer")
s.new_function(self._scan_thermocouple, name="thermocouple")
# s.new_static_value('Setpoint', 10, plotid=1)
g = s.make_graph()
self.component = g.plotcontainer
if s.execute():
s.do_scan()
self.scanner = s
return True
def traits_view(self):
v = View(UItem("component", editor=ComponentEditor()))
return v
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/lasers/tasks/editors/pyrometer_calibration_editor.py | Python | apache-2.0 | 4,570 |
from JumpScale.portal.macrolib import div_base
def main(j, args, params, *other_args):
return div_base.macro(j, args, params, self_closing=True, tag='input',
additional_tag_params={'type': 'email',
'pattern': r"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)+$"})
def match(j, args, params, tags, tasklet):
return True
| Jumpscale/jumpscale_portal8 | apps/portalbase/macros/page/email/1_main.py | Python | apache-2.0 | 436 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke Xcode compiler toolchain"""
from __future__ import absolute_import as _abs
import os
import sys
import subprocess
import json
from .._ffi.base import py_str
from . import util
def xcrun(cmd):
"""Run xcrun and return the output.
Parameters
----------
cmd : list of str
The command sequence.
Returns
-------
out : str
The output string.
"""
cmd = ["xcrun"] + cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
return out.strip()
def codesign(lib):
"""Codesign the shared libary
This is an required step for library to be loaded in
the app.
Parameters
----------
lib : The path to the library.
"""
if "TVM_IOS_CODESIGN" not in os.environ:
raise RuntimeError("Require environment variable TVM_IOS_CODESIGN " " to be the signature")
signature = os.environ["TVM_IOS_CODESIGN"]
cmd = ["codesign", "--force", "--sign", signature]
cmd += [lib]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Codesign error:\n"
msg += py_str(out)
raise RuntimeError(msg)
def create_dylib(output, objects, arch, sdk="macosx"):
"""Create dynamic library.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : str
The additional options.
arch : str
Target major architectures
sdk : str
The sdk to be used.
"""
clang = xcrun(["-sdk", sdk, "-find", "clang"])
sdk_path = xcrun(["-sdk", sdk, "--show-sdk-path"])
cmd = [clang]
cmd += ["-dynamiclib"]
cmd += ["-arch", arch]
cmd += ["-isysroot", sdk_path]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign so as default output format
create_dylib.output_format = "dylib"
def compile_metal(code, path_target=None, sdk="macosx"):
"""Compile metal with CLI tool from env.
Parameters
----------
code : str
The cuda code.
path_target : str, optional
Output file.
sdk : str, optional
The target platform SDK.
Return
------
metallib : bytearray
The bytearray of the metallib
"""
temp = util.tempdir()
temp_code = temp.relpath("my_lib.metal")
temp_ir = temp.relpath("my_lib.air")
temp_target = temp.relpath("my_lib.metallib")
with open(temp_code, "w") as out_file:
out_file.write(code)
file_target = path_target if path_target else temp_target
# See:
# - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long
#
# xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air
# xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib
cmd1 = ["xcrun", "-sdk", sdk, "metal", "-O3"]
cmd1 += ["-c", temp_code, "-o", temp_ir]
cmd2 = ["xcrun", "-sdk", sdk, "metallib"]
cmd2 += [temp_ir, "-o", file_target]
proc = subprocess.Popen(
" ".join(cmd1) + ";" + " ".join(cmd2),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.stderr.write("Compilation error:\n")
sys.stderr.write(py_str(out))
sys.stderr.flush()
libbin = None
else:
libbin = bytearray(open(file_target, "rb").read())
return libbin
def compile_coreml(model, model_name="main", out_dir="."):
"""Compile coreml model and return the compiled model path."""
mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel")
mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc")
metadata = {"inputs": list(model.input_description), "outputs": list(model.output_description)}
# Use the description field to send info to CoreML runtime
model.short_description = json.dumps(metadata)
model.save(mlmodel_path)
res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir])
if not os.path.isdir(mlmodelc_path):
raise RuntimeError("Compile failed: %s" % res)
return mlmodelc_path
class XCodeRPCServer(object):
"""Wrapper for RPC server
Parameters
----------
cmd : list of str
The command to run
lock: FileLock
Lock on the path
"""
def __init__(self, cmd, lock):
self.proc = subprocess.Popen(cmd)
self.lock = lock
def join(self):
"""Wait server to finish and release its resource"""
self.proc.wait()
self.lock.release()
def popen_test_rpc(host, port, key, destination, libs=None, options=None):
"""Launch rpc server via xcodebuild test through another process.
Parameters
----------
host : str
The address of RPC proxy host.
port : int
The port of RPC proxy host
key : str
The key of the RPC server
destination : str
Destination device of deployment, as in xcodebuild
libs : list of str
List of files to be packed into app/Frameworks/tvm
These can be dylibs that can be loaed remoted by RPC.
options : list of str
Additional options to xcodebuild
Returns
-------
proc : Popen
The test rpc server process.
Don't do wait() on proc, since it can terminate normally.
"""
if "TVM_IOS_RPC_ROOT" in os.environ:
rpc_root = os.environ["TVM_IOS_RPC_ROOT"]
else:
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
rpc_root = os.path.join(curr_path, "../../../apps/ios_rpc")
proj_path = os.path.realpath(os.path.join(rpc_root, "tvmrpc.xcodeproj"))
if not os.path.exists(proj_path):
raise RuntimeError(
"Cannot find tvmrpc.xcodeproj in %s,"
+ (" please set env TVM_IOS_RPC_ROOT correctly" % rpc_root)
)
# Lock the path so only one file can run
lock = util.filelock(os.path.join(rpc_root, "ios_rpc.lock"))
with open(os.path.join(rpc_root, "rpc_config.txt"), "w") as fo:
fo.write("%s %d %s\n" % (host, port, key))
libs = libs if libs else []
for file_name in libs:
fo.write("%s\n" % file_name)
cmd = [
"xcrun",
"xcodebuild",
"-scheme",
"tvmrpc",
"-project",
proj_path,
"-destination",
destination,
]
if options:
cmd += options
cmd += ["test"]
return XCodeRPCServer(cmd, lock)
| sxjscience/tvm | python/tvm/contrib/xcode.py | Python | apache-2.0 | 7,815 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
### ETL DAG Tutorial Documentation
This ETL DAG is compatible with Airflow 1.10.x (specifically tested with 1.10.12) and is referenced
as part of the documentation that goes along with the Airflow Functional DAG tutorial located
[here](https://airflow.apache.org/tutorial_decorated_flows.html)
"""
# [START tutorial]
# [START import_module]
import json
from datetime import datetime
from textwrap import dedent
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.operators.python import PythonOperator
# [END import_module]
# [START instantiate_dag]
with DAG(
'tutorial_etl_dag',
# [START default_args]
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args={'retries': 2},
# [END default_args]
description='ETL DAG tutorial',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example'],
) as dag:
# [END instantiate_dag]
# [START documentation]
dag.doc_md = __doc__
# [END documentation]
# [START extract_function]
def extract(**kwargs):
ti = kwargs['ti']
data_string = '{"1001": 301.27, "1002": 433.21, "1003": 502.22}'
ti.xcom_push('order_data', data_string)
# [END extract_function]
# [START transform_function]
def transform(**kwargs):
ti = kwargs['ti']
extract_data_string = ti.xcom_pull(task_ids='extract', key='order_data')
order_data = json.loads(extract_data_string)
total_order_value = 0
for value in order_data.values():
total_order_value += value
total_value = {"total_order_value": total_order_value}
total_value_json_string = json.dumps(total_value)
ti.xcom_push('total_order_value', total_value_json_string)
# [END transform_function]
# [START load_function]
def load(**kwargs):
ti = kwargs['ti']
total_value_string = ti.xcom_pull(task_ids='transform', key='total_order_value')
total_order_value = json.loads(total_value_string)
print(total_order_value)
# [END load_function]
# [START main_flow]
extract_task = PythonOperator(
task_id='extract',
python_callable=extract,
)
extract_task.doc_md = dedent(
"""\
#### Extract task
A simple Extract task to get data ready for the rest of the data pipeline.
In this case, getting data is simulated by reading from a hardcoded JSON string.
This data is then put into xcom, so that it can be processed by the next task.
"""
)
transform_task = PythonOperator(
task_id='transform',
python_callable=transform,
)
transform_task.doc_md = dedent(
"""\
#### Transform task
A simple Transform task which takes in the collection of order data from xcom
and computes the total order value.
This computed value is then put into xcom, so that it can be processed by the next task.
"""
)
load_task = PythonOperator(
task_id='load',
python_callable=load,
)
load_task.doc_md = dedent(
"""\
#### Load task
A simple Load task which takes in the result of the Transform task, by reading it
from xcom and instead of saving it to end user review, just prints it out.
"""
)
extract_task >> transform_task >> load_task
# [END main_flow]
# [END tutorial]
| Acehaidrey/incubator-airflow | airflow/example_dags/tutorial_etl_dag.py | Python | apache-2.0 | 4,302 |
from django.apps import AppConfig
class AccountConfig(AppConfig):
name = 'account'
verbose_name = "EVE W-Space account module"
# Register registries provided by this app
import account.profile_section_registry
import account.group_admin_section_registry
import account.user_admin_section_registry | evewspace/eve-wspace | evewspace/account/apps.py | Python | apache-2.0 | 306 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
from neutron_lib.db import standard_attr
import sqlalchemy as sa
class ProvisioningBlock(model_base.BASEV2):
# the standard attr id of the thing we want to block
standard_attr_id = (
sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
sa.ForeignKey(standard_attr.StandardAttribute.id,
ondelete="CASCADE"),
primary_key=True))
# the entity that wants to block the status change (e.g. L2 Agent)
entity = sa.Column(sa.String(255), nullable=False, primary_key=True)
| mahak/neutron | neutron/db/models/provisioning_block.py | Python | apache-2.0 | 1,178 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_delete
from pants.util.process_handler import subprocess
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class BinaryCreateIntegrationTest(PantsRunIntegrationTest):
def test_autovalue_classfiles(self):
self.build_and_run(
pants_args=['binary', 'examples/src/java/org/pantsbuild/example/autovalue'],
rel_out_path='dist',
java_args=['-jar', 'autovalue.jar'],
expected_output='Hello Autovalue!'
)
def test_manifest_entries(self):
self.build_and_run(
pants_args=['binary',
'testprojects/src/java/org/pantsbuild/testproject/manifest:manifest-with-source'],
rel_out_path='dist',
java_args=['-cp', 'manifest-with-source.jar', 'org.pantsbuild.testproject.manifest.Manifest'],
expected_output='Hello World! Version: 1.2.3'
)
def test_manifest_entries_no_source(self):
self.build_and_run(
pants_args=['binary',
'testprojects/src/java/org/pantsbuild/testproject/manifest:manifest-no-source'],
rel_out_path='dist',
java_args=['-cp', 'manifest-no-source.jar', 'org.pantsbuild.testproject.manifest.Manifest'],
expected_output='Hello World! Version: 4.5.6',
)
def test_manifest_entries_bundle(self):
# package level manifest entry, in this case, `Implementation-Version`, no longer work
# because package files are not included in the bundle jar, instead they are referenced
# through its manifest's Class-Path.
self.build_and_run(
pants_args=['bundle',
'testprojects/src/java/org/pantsbuild/testproject/manifest:manifest-app'],
rel_out_path=os.path.join('dist', ('testprojects.src.java.org.pantsbuild.testproject'
'.manifest.manifest-app-bundle')),
java_args=['-cp', 'manifest-no-source.jar', 'org.pantsbuild.testproject.manifest.Manifest'],
expected_output='Hello World! Version: null',
)
# If we still want to get package level manifest entries, we need to include packages files
# in the bundle jar through `--deployjar`. However use that with caution because the monolithic
# jar may have multiple packages.
self.build_and_run(
pants_args=['bundle',
'testprojects/src/java/org/pantsbuild/testproject/manifest:manifest-app',
'--bundle-jvm-deployjar'],
rel_out_path=os.path.join('dist', ('testprojects.src.java.org.pantsbuild.testproject'
'.manifest.manifest-app-bundle')),
java_args=['-cp', 'manifest-no-source.jar', 'org.pantsbuild.testproject.manifest.Manifest'],
expected_output='Hello World! Version: 4.5.6',
)
def test_agent_dependency(self):
directory = "testprojects/src/java/org/pantsbuild/testproject/manifest"
target = "{}:manifest-with-agent".format(directory)
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(["binary", target], workdir=workdir)
self.assert_success(pants_run)
jar = "dist/manifest-with-agent.jar"
with open_zip(jar, mode='r') as j:
with j.open("META-INF/MANIFEST.MF") as jar_entry:
entries = {tuple(line.strip().split(": ", 2)) for line in jar_entry.readlines() if line.strip()}
self.assertIn(('Agent-Class', 'org.pantsbuild.testproject.manifest.Agent'), entries)
def test_deploy_excludes(self):
jar_filename = os.path.join('dist', 'deployexcludes.jar')
safe_delete(jar_filename)
command = [
'--no-compile-zinc-capture-classpath',
'binary',
'testprojects/src/java/org/pantsbuild/testproject/deployexcludes',
]
with self.pants_results(command) as pants_run:
self.assert_success(pants_run)
# The resulting binary should not contain any guava classes
with open_zip(jar_filename) as jar_file:
self.assertEquals({'META-INF/',
'META-INF/MANIFEST.MF',
'org/',
'org/pantsbuild/',
'org/pantsbuild/testproject/',
'org/pantsbuild/testproject/deployexcludes/',
'org/pantsbuild/testproject/deployexcludes/DeployExcludesMain.class'},
set(jar_file.namelist()))
# This jar should not run by itself, missing symbols
self.run_java(java_args=['-jar', jar_filename],
expected_returncode=1,
expected_output='java.lang.NoClassDefFoundError: '
'com/google/common/collect/ImmutableSortedSet')
# But adding back the deploy_excluded symbols should result in a clean run.
classpath = [jar_filename,
os.path.join(pants_run.workdir,
'ivy/jars/com.google.guava/guava/jars/guava-18.0.jar')]
self.run_java(java_args=['-cp', os.pathsep.join(classpath),
'org.pantsbuild.testproject.deployexcludes.DeployExcludesMain'],
expected_output='DeployExcludes Hello World')
def build_and_run(self, pants_args, rel_out_path, java_args, expected_output):
self.assert_success(self.run_pants(['clean-all']))
with self.pants_results(pants_args, {}) as pants_run:
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), rel_out_path)
self.run_java(java_args=java_args, expected_output=expected_output, cwd=out_path)
def run_java(self, java_args, expected_returncode=0, expected_output=None, cwd=None):
command = ['java'] + java_args
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd)
stdout, stderr = process.communicate()
self.assertEquals(expected_returncode, process.returncode,
('Expected exit code {} from command `{}` but got {}:\n'
'stdout:\n{}\n'
'stderr:\n{}'
.format(expected_returncode,
' '.join(command),
process.returncode,
stdout,
stderr)))
self.assertIn(expected_output, stdout if expected_returncode == 0 else stderr)
| UnrememberMe/pants | tests/python/pants_test/backend/jvm/tasks/test_binary_create_integration.py | Python | apache-2.0 | 6,836 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import collections
from .google import Google
from .github import GitHub
from .linkedin import LinkedIn
def addProvider(provider):
idMap[provider.getProviderName()] = provider
idMap = collections.OrderedDict()
addProvider(Google)
addProvider(GitHub)
addProvider(LinkedIn)
| essamjoubori/girder | plugins/oauth/server/providers/__init__.py | Python | apache-2.0 | 1,071 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack import resource
from openstack import utils
class ServerGroup(resource.Resource):
resource_key = 'server_group'
resources_key = 'server_groups'
base_path = '/os-server-groups'
_query_mapping = resource.QueryParameters("all_projects")
_max_microversion = '2.64'
# capabilities
allow_create = True
allow_fetch = True
allow_delete = True
allow_list = True
# Properties
#: A name identifying the server group
name = resource.Body('name')
#: The list of policies supported by the server group (till 2.63)
policies = resource.Body('policies')
#: The policy field represents the name of the policy (from 2.64)
policy = resource.Body('policy')
#: The list of members in the server group
member_ids = resource.Body('members')
#: The metadata associated with the server group
metadata = resource.Body('metadata')
#: The project ID who owns the server group.
project_id = resource.Body('project_id')
#: The rules field, which is a dict, can be applied to the policy
rules = resource.Body('rules', type=list, list_type=dict)
#: The user ID who owns the server group
user_id = resource.Body('user_id')
def _get_microversion_for(self, session, action):
"""Get microversion to use for the given action.
The base version uses :meth:`_get_microversion_for_list`.
Subclasses can override this method if more complex logic is needed.
:param session: :class`keystoneauth1.adapter.Adapter`
:param action: One of "fetch", "commit", "create", "delete", "patch".
Unused in the base implementation.
:return: microversion as string or ``None``
"""
if action not in ('fetch', 'commit', 'create', 'delete', 'patch'):
raise ValueError('Invalid action: %s' % action)
microversion = self._get_microversion_for_list(session)
if action == 'create':
# `policy` and `rules` are added with mv=2.64. In it also
# `policies` are removed.
if utils.supports_microversion(session, '2.64'):
if self.policies:
if not self.policy and isinstance(self.policies, list):
self.policy = self.policies[0]
self._body.clean(only={'policies'})
microversion = self._max_microversion
else:
if self.rules:
message = ("API version %s is required to set rules, but "
"it is not available.") % 2.64
raise exceptions.NotSupported(message)
if self.policy:
if not self.policies:
self.policies = [self.policy]
self._body.clean(only={'policy'})
return microversion
| openstack/python-openstacksdk | openstack/compute/v2/server_group.py | Python | apache-2.0 | 3,440 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_grad = grad_ctxt.grad_state.switch_map.get(op)
if merge_grad is not None:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO(yuanbyu): Perform shape inference with this new input.
if grad[1] is not None:
# pylint: disable=protected-access
control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1])
# pylint: enable=protected-access
return None, None
elif grad[0] is not None:
# This is the first time this Switch is visited. It comes from
# the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_grad
return merge_grad, None
else:
# This is the first time this Switch is visited. It comes from the
# Identity branch. Such a Switch has `None` gradient for the Exit branch,
# meaning the output is not differentiable.
return None, None
elif isinstance(op_ctxt, CondContext):
zero_grad = grad[1 - op_ctxt.branch]
# At this point, we have created zero_grad guarded by the right switch.
# Unfortunately, we may still get None here for not trainable data types.
if zero_grad is None:
return None, None
return merge(grad, name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = control_flow_ops._GetOutputContext(input_op)
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the attribute `back_prop` is false,
# no gradient computation.
return None
# pylint: disable=protected-access
if op._get_control_flow_context().grad_state:
raise TypeError("Second-order gradient for while loops not supported.")
# pylint: enable=protected-access
if isinstance(grad, ops.Tensor):
grad_ctxt.AddName(grad.name)
else:
if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(grad))
grad_ctxt.AddName(grad.values.name)
grad_ctxt.AddName(grad.indices.name)
dense_shape = grad.dense_shape
if dense_shape is not None:
grad_ctxt.AddName(dense_shape.name)
grad_ctxt.Enter()
# pylint: disable=protected-access
result = control_flow_ops._Enter(
grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
# pylint: enable=protected-access
grad_ctxt.loop_enters.append(result)
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# Skip gradient computation, if the attribute `back_prop` is false.
return grad
if grad_ctxt.grad_state is None:
# Pass the gradient through if we are not in a gradient while context.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
if isinstance(grad, ops.Tensor):
result = grad_ctxt.AddBackpropAccumulator(op, grad)
elif isinstance(grad, ops.IndexedSlices):
result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
else:
# TODO(yuanbyu, lukasr): Add support for SparseTensor.
raise TypeError("Type %s not supported" % type(grad))
else:
result = exit(grad)
grad_ctxt.loop_exits.append(result)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
| guschmue/tensorflow | tensorflow/python/ops/control_flow_grad.py | Python | apache-2.0 | 9,075 |
"""Abstract base class for kernel clients"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import abc
#-----------------------------------------------------------------------------
# Main kernel client class
#-----------------------------------------------------------------------------
class KernelClientABC(object):
"""KernelManager ABC.
The docstrings for this class can be found in the base implementation:
`IPython.kernel.client.KernelClient`
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def kernel(self):
pass
@abc.abstractproperty
def shell_channel_class(self):
pass
@abc.abstractproperty
def iopub_channel_class(self):
pass
@abc.abstractproperty
def hb_channel_class(self):
pass
@abc.abstractproperty
def stdin_channel_class(self):
pass
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
@abc.abstractmethod
def start_channels(self, shell=True, iopub=True, stdin=True, hb=True):
pass
@abc.abstractmethod
def stop_channels(self):
pass
@abc.abstractproperty
def channels_running(self):
pass
@abc.abstractproperty
def shell_channel(self):
pass
@abc.abstractproperty
def iopub_channel(self):
pass
@abc.abstractproperty
def stdin_channel(self):
pass
@abc.abstractproperty
def hb_channel(self):
pass
| noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/kernel/clientabc.py | Python | apache-2.0 | 2,081 |
# json error messages for oauth, api parameters, getting, updating, uploading, and deleting images
oauth_error_json = {'status': 'error', 'msg': "could not verify oauth credentials"}
upload_image_blob_error_json = {'status': 'error', 'msg': "failure to upload image to azure storage"}
upload_image_db_error_json = {'status': 'error', 'msg': "failure to upload metadata to documentdb"}
delete_image_blob_error_json = {'status': 'error', 'msg': "failure to delete image from azure storage"}
delete_image_db_error_json = {'status': 'error', 'msg': "failure to delete image from documentdb"}
get_image_error_json = {'status': 'error', 'msg': "failure to get image links from documentdb"}
update_tags_error_json = {'status': 'error', 'msg': "failure to update tags in documentdb"}
api_parameters_error_json = {'status': 'error', 'msg': "api request missing header or body parameters"}
# json success messages each api method
upload_image_success_json = {'status': 'success', 'msg': "image uploaded"}
delete_image_success_json = {'status': 'success', 'msg': "image deleted"}
update_tags_success_json = {'status': 'success', 'msg': "tags updated"}
| rjhunter8285/nsc-cloudproject-s22016 | prototype/api/FlaskApp/FlaskApp/azure_components/static/app_json.py | Python | apache-2.0 | 1,143 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from importlib import import_module
import logging
import os
import pkgutil
from horizon.utils import file_discovery
from openstack_dashboard import theme_settings
def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules
def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL')
or hasattr(submodule, 'PANEL_GROUP')
or hasattr(submodule, 'FEATURE')):
# If enabled and local.enabled contains a same filename,
# the file loaded later (i.e., local.enabled) will be used.
name = submodule.__name__.rsplit('.', 1)[1]
config[name] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
def update_dashboards(modules, horizon_config, installed_apps):
"""Imports dashboard and panel configuration from modules and applies it.
The submodules from specified modules are imported, and the configuration
for the specific dashboards is merged, with the later modules overriding
settings from the former. Then the configuration is applied to
horizon_config and installed_apps, in alphabetical order of files from
which the configurations were imported.
For example, given this setup:
| foo/__init__.py
| foo/_10_baz.py
| foo/_20_qux.py
| bar/__init__.py
| bar/_30_baz_.py
and being called with ``modules=[foo, bar]``, we will first have the
configuration from ``_10_baz`` and ``_30_baz`` merged, then the
configurations will be applied in order ``qux``, ``baz`` (``baz`` is
second, because the most recent file which contributed to it, ``_30_baz``,
comes after ``_20_qux``).
Panel specific configurations are stored in horizon_config. Dashboards
from both plugin-based and openstack_dashboard must be registered before
the panel configuration can be applied. Making changes to the panel is
deferred until the horizon autodiscover is completed, configurations are
applied in alphabetical order of files where it was imported.
"""
config_dashboards = horizon_config.get('dashboards', [])
if config_dashboards or horizon_config.get('default_dashboard'):
logging.warning(
'"dashboards" and "default_dashboard" in (local_)settings is '
'DEPRECATED now and may be unsupported in some future release. '
'The preferred way to specify the order of dashboards and the '
'default dashboard is the pluggable dashboard mechanism (in %s).',
', '.join([os.path.abspath(module.__path__[0])
for module in modules])
)
enabled_dashboards = []
disabled_dashboards = []
exceptions = horizon_config.get('exceptions', {})
apps = []
angular_modules = []
js_files = []
js_spec_files = []
scss_files = []
panel_customization = []
update_horizon_config = {}
for key, config in import_dashboard_config(modules):
if config.get('DISABLED', False):
if config.get('DASHBOARD'):
disabled_dashboards.append(config.get('DASHBOARD'))
continue
_apps = config.get('ADD_INSTALLED_APPS', [])
apps.extend(_apps)
if config.get('AUTO_DISCOVER_STATIC_FILES', False):
for _app in _apps:
module = import_module(_app)
base_path = os.path.join(module.__path__[0], 'static/')
file_discovery.populate_horizon_config(horizon_config,
base_path)
add_exceptions = config.get('ADD_EXCEPTIONS', {}).items()
for category, exc_list in add_exceptions:
exceptions[category] = tuple(set(exceptions.get(category, ())
+ exc_list))
angular_modules.extend(config.get('ADD_ANGULAR_MODULES', []))
# avoid pulling in dashboard javascript dependencies multiple times
existing = set(js_files)
js_files.extend([f for f in config.get('ADD_JS_FILES', [])
if f not in existing])
js_spec_files.extend(config.get('ADD_JS_SPEC_FILES', []))
scss_files.extend(config.get('ADD_SCSS_FILES', []))
update_horizon_config.update(
config.get('UPDATE_HORIZON_CONFIG', {}))
if config.get('DASHBOARD'):
dashboard = key
enabled_dashboards.append(dashboard)
if config.get('DEFAULT', False):
horizon_config['default_dashboard'] = dashboard
elif config.get('PANEL') or config.get('PANEL_GROUP'):
config.pop("__builtins__", None)
panel_customization.append(config)
# Preserve the dashboard order specified in settings
dashboards = ([d for d in config_dashboards
if d not in disabled_dashboards] +
[d for d in enabled_dashboards
if d not in config_dashboards])
horizon_config['panel_customization'] = panel_customization
horizon_config['dashboards'] = tuple(dashboards)
horizon_config.setdefault('exceptions', {}).update(exceptions)
horizon_config.update(update_horizon_config)
horizon_config.setdefault('angular_modules', []).extend(angular_modules)
horizon_config.setdefault('js_files', []).extend(js_files)
horizon_config.setdefault('js_spec_files', []).extend(js_spec_files)
horizon_config.setdefault('scss_files', []).extend(scss_files)
# apps contains reference to applications declared in the enabled folder
# basically a list of applications that are internal and external plugins
# installed_apps contains reference to applications declared in settings
# such as django.contribe.*, django_pyscss, compressor, horizon, etc...
# for translation, we are only interested in the list of external plugins
# so we save the reference to it before we append to installed_apps
horizon_config.setdefault('plugins', []).extend(apps)
installed_apps[0:0] = apps
# Order matters, list the xstatic module name and the entry point file(s) for
# that module (this is often defined as the "main" in bower.json, and
# as the xstatic module MAIN variable in the very few compliant xstatic
# modules). If the xstatic module does define a MAIN then set the files
# list to None.
# This list is to be used as the base list which is potentially added to in
# local_settings.py before being passed to get_xstatic_dirs()
BASE_XSTATIC_MODULES = [
('xstatic.pkg.jquery', ['jquery.js']),
('xstatic.pkg.jquery_migrate', ['jquery-migrate.js']),
('xstatic.pkg.angular', [
'angular.js',
'angular-cookies.js',
'angular-sanitize.js',
'angular-route.js'
]),
('xstatic.pkg.angular_bootstrap', ['angular-bootstrap.js']),
('xstatic.pkg.angular_gettext', None),
('xstatic.pkg.angular_lrdragndrop', None),
('xstatic.pkg.angular_smart_table', None),
('xstatic.pkg.angular_fileupload', ['ng-file-upload-all.js']),
('xstatic.pkg.d3', ['d3.js']),
('xstatic.pkg.jquery_quicksearch', ['jquery.quicksearch.js']),
('xstatic.pkg.jquery_tablesorter', ['jquery.tablesorter.js']),
('xstatic.pkg.jquery_ui', ['jquery-ui.js']),
('xstatic.pkg.bootstrap_scss', ['js/bootstrap.js']),
('xstatic.pkg.bootstrap_datepicker', ['bootstrap-datepicker.js']),
('xstatic.pkg.hogan', ['hogan.js']),
('xstatic.pkg.rickshaw', ['rickshaw.js']),
('xstatic.pkg.jsencrypt', None),
('xstatic.pkg.objectpath', ['ObjectPath.js']),
('xstatic.pkg.tv4', ['tv4.js']),
('xstatic.pkg.angular_schema_form', ['schema-form.js']),
# @imported in scss files diectly
('xstatic.pkg.font_awesome', []),
('xstatic.pkg.bootswatch', []),
('xstatic.pkg.roboto_fontface', []),
('xstatic.pkg.mdi', []),
# testing only, not included in application
('xstatic.pkg.jasmine', []),
('xstatic.pkg.termjs', []),
]
def get_xstatic_dirs(XSTATIC_MODULES, HORIZON_CONFIG):
"""Discover static file configuration of the xstatic modules.
For each entry in the XSTATIC_MODULES list we determine the entry
point files (which may come from the xstatic MAIN var) and then
determine where in the Django static tree the xstatic package's contents
should be placed.
For jquery.bootstrap.wizard.js the module name is None the static file is
actually a 3rd-party file but resides in the Horizon source tree and not
an xstatic package.
The xstatic.pkg.jquery_ui package had its contents moved by packagers so
it must be handled as a special case.
"""
STATICFILES_DIRS = []
HORIZON_CONFIG['xstatic_lib_files'] = []
for module_name, files in XSTATIC_MODULES:
module = import_module(module_name)
if module_name == 'xstatic.pkg.jquery_ui':
# determine the correct path for jquery-ui which packagers moved
if module.VERSION.startswith('1.10.'):
# The 1.10.x versions already contain 'ui' directory.
files = ['ui/' + files[0]]
STATICFILES_DIRS.append(
('horizon/lib/' + module.NAME, module.BASE_DIR)
)
# pull the file entry points from the xstatic package MAIN if possible
if hasattr(module, 'MAIN'):
files = module.MAIN
if not isinstance(files, list):
files = [files]
# just the Javascript files, please (don't <script> css, etc
# which is explicitly included in style/themes as appropriate)
files = [file for file in files if file.endswith('.js')]
# add to the list of files to link in the HTML
for file in files:
file = 'horizon/lib/' + module.NAME + '/' + file
HORIZON_CONFIG['xstatic_lib_files'].append(file)
return STATICFILES_DIRS
def find_static_files(
HORIZON_CONFIG,
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH):
import horizon
import openstack_dashboard
os_dashboard_home_dir = openstack_dashboard.__path__[0]
horizon_home_dir = horizon.__path__[0]
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(horizon_home_dir, 'static/')
)
# filter out non-angular javascript code and lib
HORIZON_CONFIG['js_files'] = ([f for f in HORIZON_CONFIG['js_files']
if not f.startswith('horizon/')])
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(os_dashboard_home_dir, 'static/'),
sub_path='app/'
)
# Discover theme static resources, and in particular any
# static HTML (client-side) that the theme overrides
theme_static_files = {}
theme_info = theme_settings.get_theme_static_dirs(
AVAILABLE_THEMES,
THEME_COLLECTION_DIR,
ROOT_PATH)
for url, path in theme_info:
discovered_files = {}
# discover static files provided by the theme
file_discovery.populate_horizon_config(
discovered_files,
path
)
# Get the theme name from the theme url
theme_name = url.split('/')[-1]
# build a dictionary of this theme's static HTML templates.
# For each overridden template, strip off the '/templates/' part of the
# theme filename then use that name as the key, and the location in the
# theme directory as the value. This allows the quick lookup of
# theme path for any file overridden by a theme template
template_overrides = {}
for theme_file in discovered_files['external_templates']:
# Example:
# external_templates_dict[
# 'framework/widgets/help-panel/help-panel.html'
# ] = 'themes/material/templates/framework/widgets/\
# help-panel/help-panel.html'
(templates_part, override_path) = theme_file.split('/templates/')
template_overrides[override_path] = 'themes/' + \
theme_name + theme_file
discovered_files['template_overrides'] = template_overrides
# Save all of the discovered file info for this theme in our
# 'theme_files' object using the theme name as the key
theme_static_files[theme_name] = discovered_files
# Add the theme file info to the horizon config for use by template tags
HORIZON_CONFIG['theme_static_files'] = theme_static_files
| BiznetGIO/horizon | openstack_dashboard/utils/settings.py | Python | apache-2.0 | 14,525 |
"""Let's Encrypt client crypto utility functions.
.. todo:: Make the transition to use PSS rather than PKCS1_v1_5 when the server
is capable of handling the signatures.
"""
import logging
import os
import OpenSSL
import zope.component
from acme import crypto_util as acme_crypto_util
from acme import jose
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
logger = logging.getLogger(__name__)
# High level functions
def init_save_key(key_size, key_dir, keyname="key-letsencrypt.pem"):
"""Initializes and saves a privkey.
Inits key and saves it in PEM format on the filesystem.
.. note:: keyname is the attempted filename, it may be different if a file
already exists at the path.
:param int key_size: RSA key size in bits
:param str key_dir: Key save directory.
:param str keyname: Filename of key
:returns: Key
:rtype: :class:`letsencrypt.le_util.Key`
:raises ValueError: If unable to generate the key given key_size.
"""
try:
key_pem = make_key(key_size)
except ValueError as err:
logger.exception(err)
raise err
config = zope.component.getUtility(interfaces.IConfig)
# Save file
le_util.make_or_verify_dir(key_dir, 0o700, os.geteuid(),
config.strict_permissions)
key_f, key_path = le_util.unique_file(
os.path.join(key_dir, keyname), 0o600)
key_f.write(key_pem)
key_f.close()
logger.info("Generating key (%d bits): %s", key_size, key_path)
return le_util.Key(key_path, key_pem)
def init_save_csr(privkey, names, path, csrname="csr-letsencrypt.pem"):
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`letsencrypt.le_util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:returns: CSR
:rtype: :class:`letsencrypt.le_util.CSR`
"""
csr_pem, csr_der = make_csr(privkey.pem, names)
config = zope.component.getUtility(interfaces.IConfig)
# Save CSR
le_util.make_or_verify_dir(path, 0o755, os.geteuid(),
config.strict_permissions)
csr_f, csr_filename = le_util.unique_file(
os.path.join(path, csrname), 0o644)
csr_f.write(csr_pem)
csr_f.close()
logger.info("Creating CSR: %s", csr_filename)
return le_util.CSR(csr_filename, csr_der, "der")
# Lower level functions
def make_csr(key_str, domains):
"""Generate a CSR.
:param str key_str: PEM-encoded RSA key.
:param list domains: Domains included in the certificate.
.. todo:: Detect duplicates in `domains`? Using a set doesn't
preserve order...
:returns: new CSR in PEM and DER form containing all domains
:rtype: tuple
"""
assert domains, "Must provide one or more hostnames for the CSR."
pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_str)
req = OpenSSL.crypto.X509Req()
req.get_subject().CN = domains[0]
# TODO: what to put into req.get_subject()?
# TODO: put SAN if len(domains) > 1
req.add_extensions([
OpenSSL.crypto.X509Extension(
"subjectAltName",
critical=False,
value=", ".join("DNS:%s" % d for d in domains)
),
])
req.set_pubkey(pkey)
req.sign(pkey, "sha256")
return tuple(OpenSSL.crypto.dump_certificate_request(method, req)
for method in (OpenSSL.crypto.FILETYPE_PEM,
OpenSSL.crypto.FILETYPE_ASN1))
# WARNING: the csr and private key file are possible attack vectors for TOCTOU
# We should either...
# A. Do more checks to verify that the CSR is trusted/valid
# B. Audit the parsing code for vulnerabilities
def valid_csr(csr):
"""Validate CSR.
Check if `csr` is a valid CSR for the given domains.
:param str csr: CSR in PEM.
:returns: Validity of CSR.
:rtype: bool
"""
try:
req = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr)
return req.verify(req.get_pubkey())
except OpenSSL.crypto.Error as error:
logger.debug(error, exc_info=True)
return False
def csr_matches_pubkey(csr, privkey):
"""Does private key correspond to the subject public key in the CSR?
:param str csr: CSR in PEM.
:param str privkey: Private key file contents (PEM)
:returns: Correspondence of private key to CSR subject public key.
:rtype: bool
"""
req = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr)
pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey)
try:
return req.verify(pkey)
except OpenSSL.crypto.Error as error:
logger.debug(error, exc_info=True)
return False
def make_key(bits):
"""Generate PEM encoded RSA key.
:param int bits: Number of bits, at least 1024.
:returns: new RSA key in PEM form with specified number of bits
:rtype: str
"""
assert bits >= 1024 # XXX
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
def valid_privkey(privkey):
"""Is valid RSA private key?
:param str privkey: Private key file contents in PEM
:returns: Validity of private key.
:rtype: bool
"""
try:
return OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, privkey).check()
except (TypeError, OpenSSL.crypto.Error):
return False
def pyopenssl_load_certificate(data):
"""Load PEM/DER certificate.
:raises errors.Error:
"""
openssl_errors = []
for file_type in (OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1):
try:
return OpenSSL.crypto.load_certificate(file_type, data), file_type
except OpenSSL.crypto.Error as error: # TODO: other errors?
openssl_errors.append(error)
raise errors.Error("Unable to load: {0}".format(",".join(
str(error) for error in openssl_errors)))
def _get_sans_from_cert_or_req(cert_or_req_str, load_func,
typ=OpenSSL.crypto.FILETYPE_PEM):
try:
cert_or_req = load_func(typ, cert_or_req_str)
except OpenSSL.crypto.Error as error:
logger.exception(error)
raise
# pylint: disable=protected-access
return acme_crypto_util._pyopenssl_cert_or_req_san(cert_or_req)
def get_sans_from_cert(cert, typ=OpenSSL.crypto.FILETYPE_PEM):
"""Get a list of Subject Alternative Names from a certificate.
:param str cert: Certificate (encoded).
:param typ: `OpenSSL.crypto.FILETYPE_PEM` or `OpenSSL.crypto.FILETYPE_ASN1`
:returns: A list of Subject Alternative Names.
:rtype: list
"""
return _get_sans_from_cert_or_req(
cert, OpenSSL.crypto.load_certificate, typ)
def get_sans_from_csr(csr, typ=OpenSSL.crypto.FILETYPE_PEM):
"""Get a list of Subject Alternative Names from a CSR.
:param str csr: CSR (encoded).
:param typ: `OpenSSL.crypto.FILETYPE_PEM` or `OpenSSL.crypto.FILETYPE_ASN1`
:returns: A list of Subject Alternative Names.
:rtype: list
"""
return _get_sans_from_cert_or_req(
csr, OpenSSL.crypto.load_certificate_request, typ)
def dump_pyopenssl_chain(chain, filetype=OpenSSL.crypto.FILETYPE_PEM):
"""Dump certificate chain into a bundle.
:param list chain: List of `OpenSSL.crypto.X509` (or wrapped in
`acme.jose.ComparableX509`).
"""
# XXX: returns empty string when no chain is available, which
# shuts up RenewableCert, but might not be the best solution...
def _dump_cert(cert):
if isinstance(cert, jose.ComparableX509):
# pylint: disable=protected-access
cert = cert._wrapped
return OpenSSL.crypto.dump_certificate(filetype, cert)
# assumes that OpenSSL.crypto.dump_certificate includes ending
# newline character
return "".join(_dump_cert(cert) for cert in chain)
| g1franc/lets-encrypt-preview | letsencrypt/crypto_util.py | Python | apache-2.0 | 8,163 |
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
def build_config(contents, **kwargs):
return load(build_config_details(contents, **kwargs))
def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
return ConfigDetails(
working_dir,
[ConfigFile(filename, contents)],
)
def create_host_file(client, filename):
dirname = os.path.dirname(filename)
with open(filename, 'r') as fh:
content = fh.read()
container = client.create_container(
'busybox:latest',
['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
volumes={dirname: {}},
host_config=client.create_host_config(
binds={dirname: {'bind': dirname, 'ro': False}},
network_mode='none',
),
)
try:
client.start(container)
exitcode = client.wait(container)
if exitcode != 0:
output = client.logs(container)
raise Exception(
"Container exited with code {}:\n{}".format(exitcode, output))
finally:
client.remove_container(container, force=True)
| sdurrheimer/compose | tests/helpers.py | Python | apache-2.0 | 1,309 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.streams import StreamsEosTestDriverService, StreamsEosTestJobRunnerService, \
StreamsComplexEosTestJobRunnerService, StreamsEosTestVerifyRunnerService, StreamsComplexEosTestVerifyRunnerService
class StreamsEosTest(KafkaTest):
"""
Test of Kafka Streams exactly-once semantics
"""
def __init__(self, test_context):
super(StreamsEosTest, self).__init__(test_context, num_zk=1, num_brokers=3, topics={
'data': {'partitions': 5, 'replication-factor': 2},
'echo': {'partitions': 5, 'replication-factor': 2},
'min': {'partitions': 5, 'replication-factor': 2},
'sum': {'partitions': 5, 'replication-factor': 2},
'repartition': {'partitions': 5, 'replication-factor': 2},
'max': {'partitions': 5, 'replication-factor': 2},
'cnt': {'partitions': 5, 'replication-factor': 2}
})
self.driver = StreamsEosTestDriverService(test_context, self.kafka)
self.test_context = test_context
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_v2")
def test_rebalance_simple(self, processing_guarantee):
self.run_rebalance(StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestVerifyRunnerService(self.test_context, self.kafka))
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_v2")
def test_rebalance_complex(self, processing_guarantee):
self.run_rebalance(StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestVerifyRunnerService(self.test_context, self.kafka))
def run_rebalance(self, processor1, processor2, processor3, verifier):
"""
Starts and stops two test clients a few times.
Ensure that all records are delivered exactly-once.
"""
self.driver.start()
self.add_streams(processor1)
processor1.clean_node_enabled = False
self.add_streams2(processor1, processor2)
self.add_streams3(processor1, processor2, processor3)
self.stop_streams3(processor2, processor3, processor1)
self.add_streams3(processor2, processor3, processor1)
self.stop_streams3(processor1, processor3, processor2)
self.stop_streams2(processor1, processor3)
self.stop_streams(processor1)
processor1.clean_node_enabled = True
self.driver.stop()
verifier.start()
verifier.wait()
verifier.node.account.ssh("grep ALL-RECORDS-DELIVERED %s" % verifier.STDOUT_FILE, allow_fail=False)
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_v2")
def test_failure_and_recovery(self, processing_guarantee):
self.run_failure_and_recovery(StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsEosTestVerifyRunnerService(self.test_context, self.kafka))
@cluster(num_nodes=9)
@parametrize(processing_guarantee="exactly_once")
@parametrize(processing_guarantee="exactly_once_v2")
def test_failure_and_recovery_complex(self, processing_guarantee):
self.run_failure_and_recovery(StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestJobRunnerService(self.test_context, self.kafka, processing_guarantee),
StreamsComplexEosTestVerifyRunnerService(self.test_context, self.kafka))
def run_failure_and_recovery(self, processor1, processor2, processor3, verifier):
"""
Starts two test clients, then abort (kill -9) and restart them a few times.
Ensure that all records are delivered exactly-once.
"""
self.driver.start()
self.add_streams(processor1)
processor1.clean_node_enabled = False
self.add_streams2(processor1, processor2)
self.add_streams3(processor1, processor2, processor3)
self.abort_streams(processor2, processor3, processor1)
self.add_streams3(processor2, processor3, processor1)
self.abort_streams(processor2, processor3, processor1)
self.add_streams3(processor2, processor3, processor1)
self.abort_streams(processor1, processor3, processor2)
self.stop_streams2(processor1, processor3)
self.stop_streams(processor1)
processor1.clean_node_enabled = True
self.driver.stop()
verifier.start()
verifier.wait()
verifier.node.account.ssh("grep ALL-RECORDS-DELIVERED %s" % verifier.STDOUT_FILE, allow_fail=False)
def add_streams(self, processor):
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor:
processor.start()
self.wait_for_startup(monitor, processor)
def add_streams2(self, running_processor, processor_to_be_started):
with running_processor.node.account.monitor_log(running_processor.STDOUT_FILE) as monitor:
self.add_streams(processor_to_be_started)
self.wait_for_startup(monitor, running_processor)
def add_streams3(self, running_processor1, running_processor2, processor_to_be_started):
with running_processor1.node.account.monitor_log(running_processor1.STDOUT_FILE) as monitor:
self.add_streams2(running_processor2, processor_to_be_started)
self.wait_for_startup(monitor, running_processor1)
def stop_streams(self, processor_to_be_stopped):
with processor_to_be_stopped.node.account.monitor_log(processor_to_be_stopped.STDOUT_FILE) as monitor2:
processor_to_be_stopped.stop()
self.wait_for(monitor2, processor_to_be_stopped, "StateChange: PENDING_SHUTDOWN -> NOT_RUNNING")
def stop_streams2(self, keep_alive_processor, processor_to_be_stopped):
with keep_alive_processor.node.account.monitor_log(keep_alive_processor.STDOUT_FILE) as monitor:
self.stop_streams(processor_to_be_stopped)
self.wait_for_startup(monitor, keep_alive_processor)
def stop_streams3(self, keep_alive_processor1, keep_alive_processor2, processor_to_be_stopped):
with keep_alive_processor1.node.account.monitor_log(keep_alive_processor1.STDOUT_FILE) as monitor:
self.stop_streams2(keep_alive_processor2, processor_to_be_stopped)
self.wait_for_startup(monitor, keep_alive_processor1)
def abort_streams(self, keep_alive_processor1, keep_alive_processor2, processor_to_be_aborted):
with keep_alive_processor1.node.account.monitor_log(keep_alive_processor1.STDOUT_FILE) as monitor1:
with keep_alive_processor2.node.account.monitor_log(keep_alive_processor2.STDOUT_FILE) as monitor2:
processor_to_be_aborted.stop_nodes(False)
self.wait_for_startup(monitor2, keep_alive_processor2)
self.wait_for_startup(monitor1, keep_alive_processor1)
def wait_for_startup(self, monitor, processor):
self.wait_for(monitor, processor, "StateChange: REBALANCING -> RUNNING")
self.wait_for(monitor, processor, "processed [0-9]* records from topic")
def wait_for(self, monitor, processor, output):
monitor.wait_until(output,
timeout_sec=480,
err_msg=("Never saw output '%s' on " % output) + str(processor.node.account))
| TiVo/kafka | tests/kafkatest/tests/streams/streams_eos_test.py | Python | apache-2.0 | 9,430 |
import math
import gzip
import paddle.v2 as paddle
import paddle.v2.evaluator as evaluator
import conll03
import itertools
# init dataset
train_data_file = 'data/train'
test_data_file = 'data/test'
vocab_file = 'data/vocab.txt'
target_file = 'data/target.txt'
emb_file = 'data/wordVectors.txt'
train_data_reader = conll03.train(train_data_file, vocab_file, target_file)
test_data_reader = conll03.test(test_data_file, vocab_file, target_file)
word_dict, label_dict = conll03.get_dict(vocab_file, target_file)
word_vector_values = conll03.get_embedding(emb_file)
# init hyper-params
word_dict_len = len(word_dict)
label_dict_len = len(label_dict)
mark_dict_len = 2
word_dim = 50
mark_dim = 5
hidden_dim = 300
mix_hidden_lr = 1e-3
default_std = 1 / math.sqrt(hidden_dim) / 3.0
emb_para = paddle.attr.Param(
name='emb', initial_std=math.sqrt(1. / word_dim), is_static=True)
std_0 = paddle.attr.Param(initial_std=0.)
std_default = paddle.attr.Param(initial_std=default_std)
def d_type(size):
return paddle.data_type.integer_value_sequence(size)
def ner_net(is_train):
word = paddle.layer.data(name='word', type=d_type(word_dict_len))
mark = paddle.layer.data(name='mark', type=d_type(mark_dict_len))
word_embedding = paddle.layer.mixed(
name='word_embedding',
size=word_dim,
input=paddle.layer.table_projection(input=word, param_attr=emb_para))
mark_embedding = paddle.layer.mixed(
name='mark_embedding',
size=mark_dim,
input=paddle.layer.table_projection(input=mark, param_attr=std_0))
emb_layers = [word_embedding, mark_embedding]
word_caps_vector = paddle.layer.concat(
name='word_caps_vector', input=emb_layers)
hidden_1 = paddle.layer.mixed(
name='hidden1',
size=hidden_dim,
act=paddle.activation.Tanh(),
bias_attr=std_default,
input=[
paddle.layer.full_matrix_projection(
input=word_caps_vector, param_attr=std_default)
])
rnn_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=0.1)
hidden_para_attr = paddle.attr.Param(
initial_std=default_std, learning_rate=mix_hidden_lr)
rnn_1_1 = paddle.layer.recurrent(
name='rnn1-1',
input=hidden_1,
act=paddle.activation.Relu(),
bias_attr=std_0,
param_attr=rnn_para_attr)
rnn_1_2 = paddle.layer.recurrent(
name='rnn1-2',
input=hidden_1,
act=paddle.activation.Relu(),
reverse=1,
bias_attr=std_0,
param_attr=rnn_para_attr)
hidden_2_1 = paddle.layer.mixed(
name='hidden2-1',
size=hidden_dim,
bias_attr=std_default,
act=paddle.activation.STanh(),
input=[
paddle.layer.full_matrix_projection(
input=hidden_1, param_attr=hidden_para_attr),
paddle.layer.full_matrix_projection(
input=rnn_1_1, param_attr=rnn_para_attr)
])
hidden_2_2 = paddle.layer.mixed(
name='hidden2-2',
size=hidden_dim,
bias_attr=std_default,
act=paddle.activation.STanh(),
input=[
paddle.layer.full_matrix_projection(
input=hidden_1, param_attr=hidden_para_attr),
paddle.layer.full_matrix_projection(
input=rnn_1_2, param_attr=rnn_para_attr)
])
rnn_2_1 = paddle.layer.recurrent(
name='rnn2-1',
input=hidden_2_1,
act=paddle.activation.Relu(),
reverse=1,
bias_attr=std_0,
param_attr=rnn_para_attr)
rnn_2_2 = paddle.layer.recurrent(
name='rnn2-2',
input=hidden_2_2,
act=paddle.activation.Relu(),
bias_attr=std_0,
param_attr=rnn_para_attr)
hidden_3 = paddle.layer.mixed(
name='hidden3',
size=hidden_dim,
bias_attr=std_default,
act=paddle.activation.STanh(),
input=[
paddle.layer.full_matrix_projection(
input=hidden_2_1, param_attr=hidden_para_attr),
paddle.layer.full_matrix_projection(
input=rnn_2_1,
param_attr=rnn_para_attr), paddle.layer.full_matrix_projection(
input=hidden_2_2, param_attr=hidden_para_attr),
paddle.layer.full_matrix_projection(
input=rnn_2_2, param_attr=rnn_para_attr)
])
output = paddle.layer.mixed(
name='output',
size=label_dict_len,
bias_attr=False,
input=[
paddle.layer.full_matrix_projection(
input=hidden_3, param_attr=std_default)
])
if is_train:
target = paddle.layer.data(name='target', type=d_type(label_dict_len))
crf_cost = paddle.layer.crf(
size=label_dict_len,
input=output,
label=target,
param_attr=paddle.attr.Param(
name='crfw',
initial_std=default_std,
learning_rate=mix_hidden_lr))
crf_dec = paddle.layer.crf_decoding(
size=label_dict_len,
input=output,
label=target,
param_attr=paddle.attr.Param(name='crfw'))
return crf_cost, crf_dec, target
else:
predict = paddle.layer.crf_decoding(
size=label_dict_len,
input=output,
param_attr=paddle.attr.Param(name='crfw'))
return predict
def ner_net_train(data_reader=train_data_reader, num_passes=1):
# define network topology
crf_cost, crf_dec, target = ner_net(is_train=True)
evaluator.sum(name='error', input=crf_dec)
evaluator.chunk(
name='ner_chunk',
input=crf_dec,
label=target,
chunk_scheme='IOB',
num_chunk_types=(label_dict_len - 1) / 2)
# create parameters
parameters = paddle.parameters.create(crf_cost)
parameters.set('emb', word_vector_values)
# create optimizer
optimizer = paddle.optimizer.Momentum(
momentum=0,
learning_rate=2e-4,
regularization=paddle.optimizer.L2Regularization(rate=8e-4),
gradient_clipping_threshold=25,
model_average=paddle.optimizer.ModelAverage(
average_window=0.5, max_average_window=10000), )
trainer = paddle.trainer.SGD(
cost=crf_cost,
parameters=parameters,
update_equation=optimizer,
extra_layers=crf_dec)
reader = paddle.batch(
paddle.reader.shuffle(data_reader, buf_size=8192), batch_size=64)
feeding = {'word': 0, 'mark': 1, 'target': 2}
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
if event.batch_id % 1000 == 0:
result = trainer.test(reader=reader, feeding=feeding)
print "\nTest with Pass %d, Batch %d, %s" % (
event.pass_id, event.batch_id, result.metrics)
if isinstance(event, paddle.event.EndPass):
# save parameters
with gzip.open('params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
parameters.to_tar(f)
result = trainer.test(reader=reader, feeding=feeding)
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
trainer.train(
reader=reader,
event_handler=event_handler,
num_passes=num_passes,
feeding=feeding)
return parameters
def ner_net_infer(data_reader=test_data_reader, model_file='ner_model.tar.gz'):
test_data = []
test_sentences = []
for item in data_reader():
test_data.append([item[0], item[1]])
test_sentences.append(item[-1])
if len(test_data) == 10:
break
predict = ner_net(is_train=False)
lab_ids = paddle.infer(
output_layer=predict,
parameters=paddle.parameters.Parameters.from_tar(gzip.open(model_file)),
input=test_data,
field='id')
flat_data = [word for word in itertools.chain.from_iterable(test_sentences)]
labels_reverse = {}
for (k, v) in label_dict.items():
labels_reverse[v] = k
pre_lab = [labels_reverse[lab_id] for lab_id in lab_ids]
for word, label in zip(flat_data, pre_lab):
print word, label
if __name__ == '__main__':
paddle.init(use_gpu=False, trainer_count=1)
ner_net_train(data_reader=train_data_reader, num_passes=1)
ner_net_infer(
data_reader=test_data_reader, model_file='params_pass_0.tar.gz')
| zhaopu7/models | sequence_tagging_for_ner/ner.py | Python | apache-2.0 | 8,636 |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_federated as tff
from utils.datasets import infinite_emnist
def _compute_dataset_length(dataset):
return dataset.reduce(0, lambda x, _: x + 1)
class InfiniteEmnistTest(tf.test.TestCase):
def test_element_type_structure_preserved(self):
raw_client_data = tff.simulation.datasets.emnist.get_synthetic()
inf_client_data = infinite_emnist.get_infinite(raw_client_data, 5)
self.assertEqual(raw_client_data.element_type_structure,
inf_client_data.element_type_structure)
def test_pseudo_client_count(self):
raw_client_data = tff.simulation.datasets.emnist.get_synthetic()
self.assertLen(raw_client_data.client_ids, 1)
inf_client_data = infinite_emnist.get_infinite(raw_client_data, 10)
self.assertLen(inf_client_data.client_ids, 10)
def test_first_pseudo_client_preserves_original(self):
raw_client_data = tff.simulation.datasets.emnist.get_synthetic()
inf_client_data = infinite_emnist.get_infinite(raw_client_data, 5)
raw_dataset = raw_client_data.dataset_computation(
raw_client_data.client_ids[0])
inf_dataset = inf_client_data.dataset_computation(
inf_client_data.client_ids[0])
length1 = _compute_dataset_length(raw_dataset)
length2 = _compute_dataset_length(inf_dataset)
self.assertEqual(length1, length2)
for raw_batch, inf_batch in zip(raw_dataset, inf_dataset):
self.assertAllClose(raw_batch, inf_batch)
def test_transform_modifies_data(self):
data = infinite_emnist.get_infinite(
tff.simulation.datasets.emnist.get_synthetic(), 3)
datasets = [data.dataset_computation(id) for id in data.client_ids]
lengths = [_compute_dataset_length(datasets[i]) for i in [0, 1, 2]]
self.assertEqual(lengths[0], lengths[1])
self.assertEqual(lengths[1], lengths[2])
for batch0, batch1, batch2 in zip(datasets[0], datasets[1], datasets[2]):
self.assertNotAllClose(batch0, batch1)
self.assertNotAllClose(batch1, batch2)
def test_dataset_computation_equals_create_tf_dataset(self):
synth_data = tff.simulation.datasets.emnist.get_synthetic()
data = infinite_emnist.get_infinite(synth_data, 3)
for client_id in data.client_ids:
comp_dataset = data.dataset_computation(client_id)
create_tf_dataset = data.create_tf_dataset_for_client(client_id)
for batch1, batch2 in zip(comp_dataset, create_tf_dataset):
# For some reason it appears tf.quantization.quantize_and_dequantize
# sometimes (very rarely-- on one pixel for this test) gives results
# that differ by a single bit between the serialized and the
# non-serialized versions. Hence we use atol just larger than 1 bit.
self.assertAllClose(batch1['pixels'], batch2['pixels'], atol=1.5 / 255)
if __name__ == '__main__':
tf.test.main()
| google-research/public-data-in-dpfl | utils/datasets/infinite_emnist_test.py | Python | apache-2.0 | 3,434 |
# sqlalchemy/events.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event
from . import exc
from . import util
from .engine import Connectable
from .engine import Dialect
from .engine import Engine
from .pool import Pool
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`_schema.MetaData`, :class:`_schema.Table`,
:class:`_schema.Column`.
:class:`_schema.MetaData` and :class:`_schema.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`_schema.Column` is associated
with its :class:`_schema.Table`, when a
:class:`_schema.ForeignKeyConstraint`
is associated with a :class:`_schema.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
For all :class:`.DDLEvent` events, the ``propagate=True`` keyword argument
will ensure that a given event handler is propagated to copies of the
object, which are made when using the :meth:`_schema.Table.tometadata`
method::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
propagate=True
)
new_table = some_table.tometadata(new_metadata)
The above :class:`.DDL` object will also be associated with the
:class:`_schema.Table` object represented by ``new_table``.
.. seealso::
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
r"""Called before CREATE statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def after_create(self, target, connection, **kw):
r"""Called after CREATE statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def before_drop(self, target, connection, **kw):
r"""Called before DROP statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def after_drop(self, target, connection, **kw):
r"""Called after DROP statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`_schema.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`:
* ``name`` - the column's name
* ``type`` - the type of this column, which should be an instance
of :class:`~sqlalchemy.types.TypeEngine`
* ``nullable`` - boolean flag if the column is NULL or NOT NULL
* ``default`` - the column's server default value. This is
normally specified as a plain string SQL expression, however the
event can pass a :class:`.FetchedValue`, :class:`.DefaultClause`,
or :func:`_expression.text` object as well.
.. versionchanged:: 1.1.6
The :meth:`.DDLEvents.column_reflect` event allows a non
string :class:`.FetchedValue`,
:func:`_expression.text`, or derived object to be
specified as the value of ``default`` in the column
dictionary.
* ``attrs`` - dict containing optional column attributes
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`_schema.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`_schema.Column`.
Note that this event is only meaningful if either
associated with the :class:`_schema.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`_schema.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for
:class:`_schema.Table`.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
class PoolEvents(event.Events):
"""Available events for :class:`_pool.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`_pool.Pool` class and
:class:`_pool.Pool` instances, :class:`_events.PoolEvents` also accepts
:class:`_engine.Engine` objects and the :class:`_engine.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`_pool.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`_pool.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`_pool.Pool`.
The rationale for :meth:`_events.PoolEvents.first_connect`
is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`_pool.Pool`
refers to a single "creator" function (which in terms
of a :class:`_engine.Engine`
refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent
connections, such as the database version, the server and client
encoding settings, collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the
lifespan of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`_events.ConnectionEvents.engine_connect`
- a similar event
which occurs upon creation of a new :class:`_engine.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`_events.PoolEvents.reset` event is usually followed by the
:meth:`_events.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. seealso::
:meth:`_events.ConnectionEvents.rollback`
:meth:`_events.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation",
without the ``soft`` flag.
The event occurs before a final attempt to call ``.close()`` on the
connection occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
def soft_invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "soft invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked with the ``soft`` flag.
Soft invalidation refers to when the connection record that tracks
this connection will force a reconnect after the current connection
is checked in. It does not actively close the dbapi_connection
at the point at which it is called.
.. versionadded:: 1.0.3
"""
def close(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
The :meth:`.close` event corresponds to a connection that's still
associated with the pool. To intercept close events for detached
connections use :meth:`.close_detached`.
.. versionadded:: 1.1
"""
def detach(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is "detached" from a pool.
This event is emitted after the detach occurs. The connection
is no longer associated with the given connection record.
.. versionadded:: 1.1
"""
def close_detached(self, dbapi_connection):
"""Called when a detached DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
.. versionadded:: 1.1
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`_engine.Connection` and :class:`_engine.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`_engine.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s", statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`_engine.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s", statement)
When the methods are called with a `statement` parameter, such as in
:meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and
:meth:`.dbapi_error`, the statement is the exact SQL string that was
prepared for transmission to the DBAPI ``cursor`` in the connection's
:class:`.Dialect`.
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`_events.ConnectionEvents` can be established on any
combination of :class:`_engine.Engine`, :class:`_engine.Connection`,
as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`_engine.Connection`. However, for performance reasons, the
:class:`_engine.Connection` object determines at instantiation time
whether or not its parent :class:`_engine.Engine` has event listeners
established. Event listeners added to the :class:`_engine.Engine`
class or to an instance of :class:`_engine.Engine`
*after* the instantiation
of a dependent :class:`_engine.Connection` instance will usually
*not* be available on that :class:`_engine.Connection` instance.
The newly
added listeners will instead take effect for
:class:`_engine.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`_engine.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
target._has_events = True
if not retval:
if identifier == "before_execute":
orig_fn = fn
def wrap_before_execute(
conn, clauseelement, multiparams, params
):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == "before_cursor_execute":
orig_fn = fn
def wrap_before_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
orig_fn(
conn,
cursor,
statement,
parameters,
context,
executemany,
)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and identifier not in (
"before_execute",
"before_cursor_execute",
"handle_error",
):
raise exc.ArgumentError(
"Only the 'before_execute', "
"'before_cursor_execute' and 'handle_error' engine "
"event listeners accept the 'retval=True' "
"argument."
)
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`_engine.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to
:meth:`_engine.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
.. seealso::
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`_engine.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to
:meth:`_engine.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`_engine.ResultProxy` generated by the execution
.
"""
def before_cursor_execute(
self, conn, cursor, statement, parameters, context, executemany
):
"""Intercept low-level cursor execute() events before execution,
receiving the string SQL statement and DBAPI-specific parameter list to
be invoked against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`_events.ConnectionEvents`.
:param conn: :class:`_engine.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as to be passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
.. seealso::
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(
self, conn, cursor, statement, parameters, context, executemany
):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`_engine.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`_engine.ResultProxy`.
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
@util.deprecated(
"0.9",
"The :meth:`_events.ConnectionEvents.dbapi_error` "
"event is deprecated and will be removed in a future release. "
"Please refer to the :meth:`_events.ConnectionEvents.handle_error` "
"event.",
)
def dbapi_error(
self, conn, cursor, statement, parameters, context, exception
):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`_engine.Engine`, typically for logging and
debugging purposes.
.. warning::
Code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines. For exception modification, please refer to the
new :meth:`_events.ConnectionEvents.handle_error` event.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`_engine.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
"""
def handle_error(self, exception_context):
r"""Intercept all exceptions processed by the
:class:`_engine.Connection`.
This includes all exceptions emitted by the DBAPI as well as
within SQLAlchemy's statement invocation process, including
encoding errors and other statement validation errors. Other areas
in which the event is invoked include transaction begin and end,
result row fetching, cursor creation.
Note that :meth:`.handle_error` may support new kinds of exceptions
and new calling scenarios at *any time*. Code which uses this
event must expect new calling patterns to be present in minor
releases.
To support the wide variety of members that correspond to an exception,
as well as to allow extensibility of the event without backwards
incompatibility, the sole argument received is an instance of
:class:`.ExceptionContext`. This object contains data members
representing detail about the exception.
Use cases supported by this hook include:
* read-only, low-level exception handling for logging and
debugging purposes
* exception re-writing
* Establishing or disabling whether a connection or the owning
connection pool is invalidated or expired in response to a
specific exception.
The hook is called while the cursor from the failed operation
(if any) is still open and accessible. Special cleanup operations
can be called on this cursor; SQLAlchemy will attempt to close
this cursor subsequent to this hook being invoked. If the connection
is in "autocommit" mode, the transaction also remains open within
the scope of this hook; the rollback of the per-statement transaction
also occurs after the hook is called.
For the common case of detecting a "disconnect" situation which
is not currently handled by the SQLAlchemy dialect, the
:attr:`.ExceptionContext.is_disconnect` flag can be set to True which
will cause the exception to be considered as a disconnect situation,
which typically results in the connection pool being invalidated::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception, pyodbc.Error):
for code in (
'08S01', '01002', '08003',
'08007', '08S02', '08001', 'HYT00', 'HY010'):
if code in str(context.original_exception):
context.is_disconnect = True
A handler function has two options for replacing
the SQLAlchemy-constructed exception into one that is user
defined. It can either raise this new exception directly, in
which case all further event listeners are bypassed and the
exception will be raised, after appropriate cleanup as taken
place::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception,
psycopg2.OperationalError) and \
"failed" in str(context.original_exception):
raise MySpecialException("failed operation")
.. warning:: Because the
:meth:`_events.ConnectionEvents.handle_error`
event specifically provides for exceptions to be re-thrown as
the ultimate exception raised by the failed statement,
**stack traces will be misleading** if the user-defined event
handler itself fails and throws an unexpected exception;
the stack trace may not illustrate the actual code line that
failed! It is advised to code carefully here and use
logging and/or inline debugging if unexpected exceptions are
occurring.
Alternatively, a "chained" style of event handling can be
used, by configuring the handler with the ``retval=True``
modifier and returning the new exception instance from the
function. In this case, event handling will continue onto the
next handler. The "chained" exception is available using
:attr:`.ExceptionContext.chained_exception`::
@event.listens_for(Engine, "handle_error", retval=True)
def handle_exception(context):
if context.chained_exception is not None and \
"special" in context.chained_exception.message:
return MySpecialException("failed",
cause=context.chained_exception)
Handlers that return ``None`` may be used within the chain; when
a handler returns ``None``, the previous exception instance,
if any, is maintained as the current exception that is passed onto the
next handler.
When a custom exception is raised or returned, SQLAlchemy raises
this new exception as-is, it is not wrapped by any SQLAlchemy
object. If the exception is not a subclass of
:class:`sqlalchemy.exc.StatementError`,
certain features may not be available; currently this includes
the ORM's feature of adding a detail hint about "autoflush" to
exceptions raised within the autoflush process.
:param context: an :class:`.ExceptionContext` object. See this
class for details on all available members.
.. versionadded:: 0.9.7 Added the
:meth:`_events.ConnectionEvents.handle_error` hook.
.. versionchanged:: 1.1 The :meth:`.handle_error` event will now
receive all exceptions that inherit from ``BaseException``,
including ``SystemExit`` and ``KeyboardInterrupt``. The setting for
:attr:`.ExceptionContext.is_disconnect` is ``True`` in this case and
the default for
:attr:`.ExceptionContext.invalidate_pool_on_disconnect` is
``False``.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now
invoked when an :class:`_engine.Engine` fails during the initial
call to :meth:`_engine.Engine.connect`, as well as when a
:class:`_engine.Connection` object encounters an error during a
reconnect operation.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is
not fired off when a dialect makes use of the
``skip_user_error_events`` execution option. This is used
by dialects which intend to catch SQLAlchemy-specific exceptions
within specific operations, such as when the MySQL dialect detects
a table not present within the ``has_table()`` dialect method.
Prior to 1.0.0, code which implements :meth:`.handle_error` needs
to ensure that exceptions thrown in these scenarios are re-raised
without modification.
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`_engine.Connection`.
This event is called typically as the direct result of calling
the :meth:`_engine.Engine.connect` method.
It differs from the :meth:`_events.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`_engine.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`_events.PoolEvents.checkout` event
in that it is specific to the :class:`_engine.Connection` object,
not the
DBAPI connection that :meth:`_events.PoolEvents.checkout` deals with,
although
this DBAPI connection is available here via the
:attr:`_engine.Connection.connection` attribute.
But note there can in fact
be multiple :meth:`_events.PoolEvents.checkout`
events within the lifespan
of a single :class:`_engine.Connection` object, if that
:class:`_engine.Connection`
is invalidated and re-established. There can also be multiple
:class:`_engine.Connection`
objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a
:class:`_engine.Connection`
is produced.
:param conn: :class:`_engine.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`_engine.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:ref:`pool_disconnects_pessimistic` - illustrates how to use
:meth:`_events.ConnectionEvents.engine_connect`
to transparently ensure pooled connections are connected to the
database.
:meth:`_events.PoolEvents.checkout`
the lower-level pool checkout event
for an individual DBAPI connection
:meth:`_events.ConnectionEvents.set_connection_execution_options`
- a copy
of a :class:`_engine.Connection` is also made when the
:meth:`_engine.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`_engine.Connection.execution_options`
method is called.
This method is called after the new :class:`_engine.Connection`
has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new
:class:`_engine.Connection`
is produced which is inheriting execution options from its parent
:class:`_engine.Engine`; to intercept this condition, use the
:meth:`_events.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`_engine.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`_engine.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`_events.ConnectionEvents.set_engine_execution_options`
- event
which is called when :meth:`_engine.Engine.execution_options`
is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`_engine.Engine.execution_options`
method is called.
The :meth:`_engine.Engine.execution_options` method produces a shallow
copy of the :class:`_engine.Engine` which stores the new options.
That new
:class:`_engine.Engine` is passed here.
A particular application of this
method is to add a :meth:`_events.ConnectionEvents.engine_connect`
event
handler to the given :class:`_engine.Engine`
which will perform some per-
:class:`_engine.Connection` task specific to these execution options.
:param conn: The newly copied :class:`_engine.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`_engine.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`_events.ConnectionEvents.set_connection_execution_options`
- event
which is called when :meth:`_engine.Connection.execution_options`
is
called.
"""
def engine_disposed(self, engine):
"""Intercept when the :meth:`_engine.Engine.dispose` method is called.
The :meth:`_engine.Engine.dispose` method instructs the engine to
"dispose" of it's connection pool (e.g. :class:`_pool.Pool`), and
replaces it with a new one. Disposing of the old pool has the
effect that existing checked-in connections are closed. The new
pool does not establish any new connections until it is first used.
This event can be used to indicate that resources related to the
:class:`_engine.Engine` should also be cleaned up,
keeping in mind that the
:class:`_engine.Engine`
can still be used for new requests in which case
it re-acquires connection resources.
.. versionadded:: 1.0.5
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`_engine.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`_pool.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`_events.PoolEvents.reset` hook.
:param conn: :class:`_engine.Connection` object
.. seealso::
:meth:`_events.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`_pool.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`_events.PoolEvents.reset` hook.
:param conn: :class:`_engine.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`_engine.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`_engine.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`_engine.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
class DialectEvents(event.Events):
"""event interface for execution-replacement functions.
These events allow direct instrumentation and replacement
of key dialect functions which interact with the DBAPI.
.. note::
:class:`.DialectEvents` hooks should be considered **semi-public**
and experimental.
These hooks are not for general use and are only for those situations
where intricate re-statement of DBAPI mechanics must be injected onto
an existing dialect. For general-use statement-interception events,
please use the :class:`_events.ConnectionEvents` interface.
.. seealso::
:meth:`_events.ConnectionEvents.before_cursor_execute`
:meth:`_events.ConnectionEvents.before_execute`
:meth:`_events.ConnectionEvents.after_cursor_execute`
:meth:`_events.ConnectionEvents.after_execute`
.. versionadded:: 0.9.4
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Dialect
@classmethod
def _listen(cls, event_key, retval=False):
target = event_key.dispatch_target
target._has_events = True
event_key.base_listen()
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Dialect
elif issubclass(target, Dialect):
return target
elif isinstance(target, Engine):
return target.dialect
else:
return target
def do_connect(self, dialect, conn_rec, cargs, cparams):
"""Receive connection arguments before a connection is made.
Return a DBAPI connection to halt further events from invoking;
the returned connection will be used.
Alternatively, the event can manipulate the cargs and/or cparams
collections; cargs will always be a Python list that can be mutated
in-place and cparams a Python dictionary. Return None to
allow control to pass to the next event handler and ultimately
to allow the dialect to connect normally, given the updated
arguments.
.. versionadded:: 1.0.3
"""
def do_executemany(self, cursor, statement, parameters, context):
"""Receive a cursor to have executemany() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute_no_params(self, cursor, statement, context):
"""Receive a cursor to have execute() with no parameters called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute(self, cursor, statement, parameters, context):
"""Receive a cursor to have execute() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_setinputsizes(
self, inputsizes, cursor, statement, parameters, context
):
"""Receive the setinputsizes dictionary for possible modification.
This event is emitted in the case where the dialect makes use of the
DBAPI ``cursor.setinputsizes()`` method which passes information about
parameter binding for a particular statement. The given
``inputsizes`` dictionary will contain :class:`.BindParameter` objects
as keys, linked to DBAPI-specific type objects as values; for
parameters that are not bound, they are added to the dictionary with
``None`` as the value, which means the parameter will not be included
in the ultimate setinputsizes call. The event may be used to inspect
and/or log the datatypes that are being bound, as well as to modify the
dictionary in place. Parameters can be added, modified, or removed
from this dictionary. Callers will typically want to inspect the
:attr:`.BindParameter.type` attribute of the given bind objects in
order to make decisions about the DBAPI object.
After the event, the ``inputsizes`` dictionary is converted into
an appropriate datastructure to be passed to ``cursor.setinputsizes``;
either a list for a positional bound parameter execution style,
or a dictionary of string parameter keys to DBAPI type objects for
a named bound parameter execution style.
Most dialects **do not use** this method at all; the only built-in
dialect which uses this hook is the cx_Oracle dialect. The hook here
is made available so as to allow customization of how datatypes are set
up with the cx_Oracle DBAPI.
.. versionadded:: 1.2.9
.. seealso::
:ref:`cx_oracle_setinputsizes`
"""
pass
| kawamon/hue | desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/events.py | Python | apache-2.0 | 53,052 |
import unittest
import numpy as np
from ..imutils import nan_to_zero, quantile_threshold, interpolate
class ImutilsTest(unittest.TestCase):
def test_nan_to_zero_with_ge_zero(self):
ids = (
np.zeros(1),
np.ones(range(1, 10)),
np.arange(1024 * 1024)
)
for id_ in ids:
before = id_.copy()
notnull = nan_to_zero(id_)
np.testing.assert_array_equal(before, id_)
np.testing.assert_array_equal(notnull, before != 0)
def test_nan_to_zero_with_negatives(self):
negs = (
np.array([-1]),
np.array([np.nan]),
- np.arange(1, 1024 * 1024 + 1).reshape((1024, 1024)),
np.linspace(0, -20, 201)
)
for neg in negs:
sh = neg.shape
expected_notnull = np.zeros(sh).astype(np.bool_)
actual_notnull = nan_to_zero(neg)
np.testing.assert_array_equal(neg, np.zeros(sh))
np.testing.assert_array_equal(actual_notnull, expected_notnull)
def test_nan_to_zero_with_mixed(self):
test_cases = (
(np.array([-1, np.nan, 1e6, -1e6]), np.array([0, 0, 1e6, 0])),
(np.arange(-2, 7).reshape((3, 3)), np.array([[0, 0, 0], np.arange(1, 4), np.arange(4, 7)])),
)
for input_, expected in test_cases:
nan_to_zero(input_)
np.testing.assert_array_equal(input_, expected)
def test_nan_to_zero_with_empty(self):
in_ = None
self.assertRaises(AttributeError, nan_to_zero, in_)
self.assertIs(in_, None)
in_ = []
self.assertRaises(TypeError, nan_to_zero, in_)
self.assertEqual(in_, [])
in_ = np.array([])
notnull = nan_to_zero(in_)
self.assertSequenceEqual(in_, [])
self.assertSequenceEqual(notnull, [])
def test_quantile_threshold_ValueError(self):
test_cases = (
(np.arange(0), np.arange(0, dtype=np.bool_), -37),
(np.arange(0), np.arange(0, dtype=np.bool_), -4.4),
(np.arange(0), np.arange(0, dtype=np.bool_), 101)
)
kws = ('im', 'notnull_mask', 'q_val',)
for args in test_cases:
kwargs = {kw: val for kw, val in zip(kws, args)}
self.assertRaises(ValueError, quantile_threshold, **kwargs)
def test_quantile_threshold_trivial(self):
test_cases = (
((np.arange(10), np.ones(10, dtype=np.bool_), 100), (np.arange(10), 9)),
(
(np.arange(101, dtype=np.float32), np.ones(101, dtype=np.bool_), 100. / 3),
(np.concatenate((np.arange(34), np.repeat(100. / 3, 67))), 100. / 3),
),
(
(np.arange(20), np.repeat([True, False], 10), 100),
(np.concatenate((np.arange(10), np.repeat(9, 10))), 9)
),
)
kws = ('im', 'notnull_mask', 'q_val',)
for args, expected in test_cases:
kwargs = {kw: val for kw, val in zip(kws, args)}
im_in = args[0]
im_expected, q_expected = expected
q_actual = quantile_threshold(**kwargs)
self.assertAlmostEqual(q_expected, q_actual, delta=1e-7)
np.testing.assert_array_almost_equal(im_in, im_expected, decimal=6)
def test_interpolate(self):
im_in = np.arange(900, dtype=np.float32).reshape((30, 30))
im_in[2, 3] = np.nan
notnull = im_in > 0
im_out = interpolate(im_in, notnull)
np.testing.assert_array_almost_equal(im_in[notnull], im_out[notnull])
self.assertAlmostEqual(im_out[0, 0], 0)
self.assertAlmostEqual(im_out[2, 3], 63)
if __name__ == '__main__':
unittest.main()
| alexandrovteam/pyImagingMSpec | pyImagingMSpec/test/imutils_test.py | Python | apache-2.0 | 3,746 |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
""" Module for all special column handlers for workflow objects """
import datetime
from ggrc import db
from ggrc import models
from ggrc.converters import errors
from ggrc.converters import get_importables
from ggrc.converters.handlers import handlers
from ggrc_basic_permissions import models as bp_models
from ggrc_workflows import models as wf_models
class FrequencyColumnHandler(handlers.ColumnHandler):
""" Handler for workflow frequency column """
frequency_map = {
"one time": "one_time"
}
def parse_item(self):
""" parse frequency value
Returning None will set the default frequency to one_time.
"""
if not self.raw_value:
self.add_error(errors.MISSING_COLUMN, s="",
column_names=self.display_name)
return None
value = self.raw_value.lower()
frequency = self.frequency_map.get(value, value)
if frequency not in self.row_converter.object_class.VALID_FREQUENCIES:
self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name)
return frequency
def get_value(self):
reverse_map = {v: k for k, v in self.frequency_map.items()}
value = getattr(self.row_converter.obj, self.key, self.value)
return reverse_map.get(value, value)
class WorkflowColumnHandler(handlers.ParentColumnHandler):
""" handler for workflow column in task groups """
def __init__(self, row_converter, key, **options):
""" init workflow handler """
self.parent = wf_models.Workflow
super(WorkflowColumnHandler, self).__init__(row_converter, key, **options)
class TaskGroupColumnHandler(handlers.ParentColumnHandler):
""" handler for task group column in task group tasks """
def __init__(self, row_converter, key, **options):
""" init task group handler """
self.parent = wf_models.TaskGroup
super(TaskGroupColumnHandler, self).__init__(row_converter, key, **options)
class CycleTaskGroupColumnHandler(handlers.ParentColumnHandler):
""" handler for task group column in task group tasks """
def __init__(self, row_converter, key, **options):
""" init task group handler """
self.parent = wf_models.CycleTaskGroup
super(CycleTaskGroupColumnHandler, self) \
.__init__(row_converter, key, **options)
class TaskDateColumnHandler(handlers.ColumnHandler):
""" handler for start and end columns in task group tasks """
quarterly_names = {
1: "Jan/Apr/Jul/Oct",
2: "Feb/May/Aug/Nov",
3: "Mar/Jun/Sep/Dec",
}
def parse_item(self):
""" parse start and end columns fow workflow tasks
"""
raw_parts = self.raw_value.lower().split(" ")
try:
if len(raw_parts) == 2:
quarter_name, day = raw_parts
for month, quarter in self.quarterly_names.items():
if quarter.lower() == quarter_name:
return [month, int(day)]
raw_parts = self.raw_value.split("/")
return map(int, raw_parts)
except ValueError:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
def get_value(self):
start = "start" in self.key
obj = self.row_converter.obj
freq = obj.task_group.workflow.frequency
date = getattr(obj, "start_date" if start else "end_date", None)
month = getattr(obj, "relative_start_month" if start
else "relative_end_month", None)
day = getattr(obj, "relative_start_day" if start
else "relative_end_day", None)
if freq == "one_time":
if date is None:
return ""
return "{}/{}/{}".format(date.month, date.day, date.year)
elif freq in ["weekly", "monthly"]:
if day is None:
return ""
return str(day)
elif freq == "quarterly":
quarter = self.quarterly_names.get(month, None)
if None in [day, quarter]:
return ""
return "{} {}".format(quarter, day)
elif freq == "annually":
if None in [day, month]:
return ""
return "{}/{}".format(month, day)
else:
return ""
class TaskStartColumnHandler(TaskDateColumnHandler):
""" handler for start column in task group tasks """
def set_obj_attr(self):
""" set all possible start date attributes """
# disable false parentheses warning for 'not (a < b < c)'
# pylint: disable=C0325
freq = self.row_converter.obj.task_group.workflow.frequency
if freq == "one_time":
if len(self.value) != 3:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
month, day, year = self.value
try:
self.row_converter.obj.start_date = datetime.date(year, month, day)
except ValueError:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
elif freq in ["weekly", "monthly"]:
if len(self.value) != 1 or \
(freq == "weekly" and not (1 <= self.value[0] <= 5)) or \
(freq == "monthly" and not (1 <= self.value[0] <= 31)):
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
self.row_converter.obj.relative_start_day = self.value[0]
elif freq in ["quarterly", "annually"]:
if len(self.value) != 2 or \
(freq == "quarterly" and not (1 <= self.value[0] <= 3)) or \
(freq == "annually" and not (1 <= self.value[0] <= 12)) or \
not (1 <= self.value[1] <= 31):
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
self.row_converter.obj.relative_start_day = self.value[1]
self.row_converter.obj.relative_start_month = self.value[0]
else:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
class TaskEndColumnHandler(TaskDateColumnHandler):
""" handler for end column in task group tasks """
def set_obj_attr(self):
""" set all possible end date attributes """
# disable false parentheses warning for 'not (a < b < c)'
# pylint: disable=C0325
freq = self.row_converter.obj.task_group.workflow.frequency
if freq == "one_time":
if len(self.value) != 3:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
month, day, year = self.value
try:
self.row_converter.obj.end_date = datetime.date(year, month, day)
except ValueError:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
elif freq in ["weekly", "monthly"]:
if len(self.value) != 1 or \
(freq == "weekly" and not (1 <= self.value[0] <= 5)) or \
(freq == "monthly" and not (1 <= self.value[0] <= 31)):
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
self.row_converter.obj.relative_end_day = self.value[0]
elif freq in ["quarterly", "annually"]:
if len(self.value) != 2 or \
(freq == "quarterly" and not (1 <= self.value[0] <= 3)) or \
(freq == "annually" and not (1 <= self.value[0] <= 12)) or \
not (1 <= self.value[1] <= 31):
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
self.row_converter.obj.relative_end_day = self.value[1]
self.row_converter.obj.relative_end_month = self.value[0]
else:
self.add_error(errors.WRONG_VALUE_ERROR,
column_name=self.display_name)
return
class TaskTypeColumnHandler(handlers.ColumnHandler):
"""Handler for task type column in task group tasks."""
type_map = {
"rich text": "text",
"dropdown": "menu",
"drop down": "menu",
"checkboxes": "checkbox",
"checkbox": "checkbox",
}
reverse_map = {
"text": "rich text",
"menu": "dropdown",
"checkbox": "checkbox"
}
def parse_item(self):
"""Parse task type column value."""
value = self.type_map.get(self.raw_value.lower())
if value is None:
if self.raw_value.lower() in self.type_map.values():
value = self.raw_value.lower()
if value is None:
value = self.row_converter.obj.default_task_type()
default_value = self.reverse_map.get(value).title()
if self.raw_value:
self.add_warning(errors.WRONG_REQUIRED_VALUE,
value=self.raw_value,
column_name=self.display_name)
else:
self.add_warning(errors.MISSING_VALUE_WARNING,
default_value=default_value,
column_name=self.display_name)
return value
def get_value(self):
"""Get titled user readable value for taks type."""
return self.reverse_map.get(self.row_converter.obj.task_type,
"rich text").title()
class WorkflowPersonColumnHandler(handlers.UserColumnHandler):
def parse_item(self):
return self.get_users_list()
def set_obj_attr(self):
pass
def get_value(self):
workflow_person = db.session.query(wf_models.WorkflowPerson.person_id)\
.filter_by(workflow_id=self.row_converter.obj.id,)
workflow_roles = db.session.query(bp_models.UserRole.person_id)\
.filter_by(context_id=self.row_converter.obj.context_id)
users = models.Person.query.filter(
models.Person.id.in_(workflow_person) &
models.Person.id.notin_(workflow_roles)
)
emails = [user.email for user in users]
return "\n".join(emails)
def remove_current_people(self):
wf_models.WorkflowPerson.query.filter_by(
workflow_id=self.row_converter.obj.id).delete()
def insert_object(self):
if self.dry_run or not self.value:
return
self.remove_current_people()
for owner in self.value:
workflow_person = wf_models.WorkflowPerson(
workflow=self.row_converter.obj,
person=owner,
context=self.row_converter.obj.context
)
db.session.add(workflow_person)
self.dry_run = True
class ObjectsColumnHandler(handlers.ColumnHandler):
def __init__(self, row_converter, key, **options):
self.mappable = get_importables()
self.new_slugs = row_converter.block_converter.converter.new_objects
super(ObjectsColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self):
lines = [line.split(":", 1) for line in self.raw_value.splitlines()]
objects = []
for line in lines:
if len(line) != 2:
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
continue
object_class, slug = line
slug = slug.strip()
class_ = self.mappable.get(object_class.strip().lower())
if class_ is None:
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
continue
new_object_slugs = self.new_slugs[class_]
obj = class_.query.filter(class_.slug == slug).first()
if obj:
objects.append(obj)
elif not (slug in new_object_slugs and self.dry_run):
self.add_warning(errors.UNKNOWN_OBJECT,
object_type=class_._inflector.human_singular.title(),
slug=slug)
return objects
def set_obj_attr(self):
self.value = self.parse_item()
def get_value(self):
task_group_objects = wf_models.TaskGroupObject.query.filter_by(
task_group_id=self.row_converter.obj.id).all()
lines = ["{}: {}".format(t.object._inflector.title_singular.title(),
t.object.slug)
for t in task_group_objects if t.object is not None]
return "\n".join(lines)
def insert_object(self):
obj = self.row_converter.obj
existing = set((t.object_type, t.object_id)
for t in obj.task_group_objects)
for object_ in self.value:
if (object_.type, object_.id) in existing:
continue
tgo = wf_models.TaskGroupObject(
task_group=obj,
object=object_,
context=obj.context,
)
db.session.add(tgo)
db.session.flush()
def set_value(self):
pass
class ExportOnlyColumnHandler(handlers.ColumnHandler):
def parse_item(self):
pass
def set_obj_attr(self):
pass
def get_value(self):
return ""
def insert_object(self):
pass
def set_value(self):
pass
class CycleWorkflowColumnHandler(ExportOnlyColumnHandler):
def get_value(self):
return self.row_converter.obj.workflow.slug
class CycleColumnHandler(ExportOnlyColumnHandler):
def get_value(self):
return self.row_converter.obj.cycle.slug
class TaskDescriptionColumnHandler(handlers.TextareaColumnHandler):
def set_obj_attr(self):
""" Set task attribute based on task type """
if not self.value:
return
if self.row_converter.obj.task_type == "text":
self.row_converter.obj.description = self.value
else:
options = [v.strip() for v in self.value.split(",")]
self.row_converter.obj.response_options = options
def get_value(self):
if self.row_converter.obj.task_type == "text":
return self.row_converter.obj.description
else:
return ", ".join(self.row_converter.obj.response_options)
COLUMN_HANDLERS = {
"cycle": CycleColumnHandler,
"cycle_task_group": CycleTaskGroupColumnHandler,
"cycle_workflow": CycleWorkflowColumnHandler,
"frequency": FrequencyColumnHandler,
"notify_on_change": handlers.CheckboxColumnHandler,
"relative_end_date": TaskEndColumnHandler,
"relative_start_date": TaskStartColumnHandler,
"task_description": TaskDescriptionColumnHandler,
"task_group": TaskGroupColumnHandler,
"task_group_objects": ObjectsColumnHandler,
"task_type": TaskTypeColumnHandler,
"workflow": WorkflowColumnHandler,
"workflow_mapped": WorkflowPersonColumnHandler,
"finished_date": handlers.DateColumnHandler,
"verified_date": handlers.DateColumnHandler,
}
| prasannav7/ggrc-core | src/ggrc_workflows/converters/handlers.py | Python | apache-2.0 | 14,283 |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import os
import zipfile
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
unicode
except NameError:
unicode = str
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
class TestFirefoxProfile:
def setup_method(self, method):
self.capabilities = {'marionette': False}
self.driver = webdriver.Firefox(capabilities=self.capabilities)
self.webserver = SimpleWebServer()
self.webserver.start()
def test_that_we_can_accept_a_profile(self):
profile1 = webdriver.FirefoxProfile()
profile1.set_preference("browser.startup.homepage_override.mstone", "")
profile1.set_preference("startup.homepage_welcome_url", self.webserver.where_is('simpleTest.html'))
profile1.update_preferences()
profile2 = webdriver.FirefoxProfile(profile1.path)
driver = webdriver.Firefox(
capabilities=self.capabilities,
firefox_profile=profile2)
title = driver.title
driver.quit()
assert "Hello WebDriver" == title
def test_that_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.preference", "hi there")
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith("user.js"):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference",'):
assert line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_unicode_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference('sample.preference.2', unicode('hi there'))
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference.2"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith('user.js'):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference.2",'):
assert line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_integer_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.int.preference", 12345)
profile.update_preferences()
assert 12345 == profile.default_preferences["sample.int.preference"]
def test_that_boolean_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.bool.preference", True)
profile.update_preferences()
assert profile.default_preferences["sample.bool.preference"] is True
def test_that_we_delete_the_profile(self):
path = self.driver.firefox_profile.path
self.driver.quit()
assert not os.path.exists(path)
def test_profiles_do_not_share_preferences(self):
self.profile1 = webdriver.FirefoxProfile()
self.profile1.accept_untrusted_certs = False
self.profile2 = webdriver.FirefoxProfile()
# Default is true. Should remain so.
assert self.profile2.default_preferences["webdriver_accept_untrusted_certs"] is True
def test_none_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = None
try:
self.profile.set_proxy(proxy)
assert False, "exception after passing empty proxy is expected"
except ValueError:
pass
assert "network.proxy.type" not in self.profile.default_preferences
def test_unspecified_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
self.profile.set_proxy(proxy)
assert "network.proxy.type" not in self.profile.default_preferences
def test_manual_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.no_proxy = 'localhost, foo.localhost'
proxy.http_proxy = 'some.url:1234'
proxy.ftp_proxy = None
proxy.sslProxy = 'some2.url'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.MANUAL['ff_value']
assert self.profile.default_preferences["network.proxy.no_proxies_on"] == 'localhost, foo.localhost'
assert self.profile.default_preferences["network.proxy.http"] == 'some.url'
assert self.profile.default_preferences["network.proxy.http_port"] == 1234
assert self.profile.default_preferences["network.proxy.ssl"] == 'some2.url'
assert "network.proxy.ssl_port" not in self.profile.default_preferences
assert "network.proxy.ftp" not in self.profile.default_preferences
def test_pac_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.proxy_autoconfig_url = 'http://some.url:12345/path'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.PAC['ff_value']
assert self.profile.default_preferences["network.proxy.autoconfig_url"] == 'http://some.url:12345/path'
def test_autodetect_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.auto_detect = True
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.AUTODETECT['ff_value']
def teardown_method(self, method):
try:
self.driver.quit()
except:
pass # don't care since we may have killed the browser above
self.webserver.stop()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def teardown_module(module):
try:
TestFirefoxProfile.driver.quit()
except:
pass # Don't Care since we may have killed the browser above
| sag-enorman/selenium | py/test/selenium/webdriver/firefox/ff_profile_tests.py | Python | apache-2.0 | 8,464 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _LI
from sahara.openstack.common import log as logging
from sahara.utils.notification import sender
conductor = c.API
LOG = logging.getLogger(__name__)
NATURAL_SORT_RE = re.compile('([0-9]+)')
def find_dict(iterable, **rules):
"""Search for dict in iterable of dicts using specified key-value rules."""
for item in iterable:
# assert all key-value pairs from rules dict
ok = True
for k, v in six.iteritems(rules):
ok = ok and k in item and item[k] == v
if ok:
return item
return None
def find(lst, **kwargs):
for obj in lst:
match = True
for attr, value in kwargs.items():
if getattr(obj, attr) != value:
match = False
if match:
return obj
return None
def get_by_id(lst, id):
for obj in lst:
if obj.id == id:
return obj
return None
# Taken from http://stackoverflow.com/questions/4836710/does-
# python-have-a-built-in-function-for-string-natural-sort
def natural_sort_key(s):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(NATURAL_SORT_RE, s)]
def change_cluster_status(cluster, status, status_description=None):
if cluster is None:
return None
update_dict = {"status": status}
if status_description:
update_dict["status_description"] = status_description
cluster = conductor.cluster_update(context.ctx(), cluster, update_dict)
LOG.info(_LI("Cluster status has been changed: id=%(id)s, New status="
"%(status)s"), {'id': cluster.id, 'status': cluster.status})
sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
"update")
return cluster
def check_cluster_exists(cluster):
ctx = context.ctx()
# check if cluster still exists (it might have been removed)
cluster = conductor.cluster_get(ctx, cluster)
return cluster is not None
def get_instances(cluster, instances_ids=None):
inst_map = {}
for node_group in cluster.node_groups:
for instance in node_group.instances:
inst_map[instance.id] = instance
if instances_ids is not None:
return [inst_map[id] for id in instances_ids]
else:
return [v for v in six.itervalues(inst_map)]
def clean_cluster_from_empty_ng(cluster):
ctx = context.ctx()
for ng in cluster.node_groups:
if ng.count == 0:
conductor.node_group_remove(ctx, ng)
def generate_etc_hosts(cluster):
hosts = "127.0.0.1 localhost\n"
for node_group in cluster.node_groups:
for instance in node_group.instances:
hosts += "%s %s %s\n" % (instance.internal_ip,
instance.fqdn(),
instance.hostname())
return hosts
def generate_instance_name(cluster_name, node_group_name, index):
return ("%s-%s-%03d" % (cluster_name, node_group_name, index)).lower()
def generate_auto_security_group_name(node_group):
return ("%s-%s-%s" % (node_group.cluster.name, node_group.name,
node_group.id[:8])).lower()
def generate_aa_group_name(cluster_name):
return ("%s-aa-group" % cluster_name).lower()
| citrix-openstack-build/sahara | sahara/utils/general.py | Python | apache-2.0 | 3,962 |
from __future__ import absolute_import
from django.contrib import admin
from django.contrib.admin.models import DELETION
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.utils.html import escape
from admin.common_auth.logs import OSFLogEntry
from admin.common_auth.forms import UserRegistrationForm
from osf.models.user import OSFUser
class PermissionAdmin(admin.ModelAdmin):
search_fields = ['name', 'codename']
class CustomUserAdmin(UserAdmin):
add_form = UserRegistrationForm
list_display = ['username', 'given_name', 'is_active']
admin.site.register(OSFUser, CustomUserAdmin)
admin.site.register(Permission, PermissionAdmin)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
readonly_fields = [f.name for f in OSFLogEntry._meta.get_fields()]
list_filter = [
'user',
'action_flag'
]
search_fields = [
'object_repr',
'change_message'
]
list_display = [
'action_time',
'user',
'object_link',
'object_id',
'message',
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_superuser and request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def object_link(self, obj):
if obj.action_flag == DELETION:
link = escape(obj.object_repr)
elif obj.content_type is None:
link = escape(obj.object_repr)
else:
ct = obj.content_type
link = u'<a href="%s">%s</a>' % (
reverse('admin:%s_%s_change' % (ct.app_label, ct.model), args=[obj.object_id]),
escape(obj.object_repr),
)
return link
object_link.allow_tags = True
object_link.admin_order_field = 'object_repr'
object_link.short_description = u'object'
def queryset(self, request):
return super(LogEntryAdmin, self).queryset(request) \
.prefetch_related('content_type')
admin.site.register(OSFLogEntry, LogEntryAdmin)
| mluo613/osf.io | admin/common_auth/admin.py | Python | apache-2.0 | 2,230 |
#!/usr/bin/env python
import sys # reads command-line args
import ConfigParser
import os
config = ConfigParser.ConfigParser()
config.read(['os.cfg',
os.path.expanduser('~/.os.cfg'),
'/etc/os-maint/os.cfg'])
os_user_name = config.get('OPENSTACK', 'os_user_name')
os_password = config.get('OPENSTACK', 'os_password')
os_tenant_name = config.get('OPENSTACK', 'os_tenant_name')
os_auth_url = config.get('OPENSTACK', 'os_auth_url')
os_region_name = config.get('OPENSTACK', 'os_region_name')
broken_n_sgroups = []
known_tids = []
from neutronclient.v2_0 import client as neutronclient
nc = neutronclient.Client(username=os_user_name,
password=os_password,
tenant_name=os_tenant_name,
auth_url=os_auth_url)
from keystoneclient.v2_0 import client as kclient
keystone = kclient.Client(username=os_user_name,
password=os_password,
tenant_name=os_tenant_name,
auth_url=os_auth_url
)
for tenant in keystone.tenants.list():
known_tids.append(tenant.id)
security_groups = nc.list_security_groups()
for n_sgroup in security_groups.get('security_groups'):
tid = n_sgroup.get('tenant_id')
if tid not in known_tids:
print "stray sgroup %s (tenant %s DNE)" % (n_sgroup.get('id'), tid)
| drwahl/os-maintenance-tools | bin/neutron_clean_secgroups.py | Python | apache-2.0 | 1,359 |
# Copyright 2011 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Speedway iptables generator. This is a subclass of Iptables lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = 'watson@google.com (Tony Watson)'
from string import Template
from lib import iptables
class Error(Exception):
pass
class Term(iptables.Term):
"""Generate Iptables policy terms."""
_PLATFORM = 'speedway'
_PREJUMP_FORMAT = None
_POSTJUMP_FORMAT = Template('-A $filter -j $term')
class Speedway(iptables.Iptables):
"""Generates filters and terms from provided policy object."""
_PLATFORM = 'speedway'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.ipt'
_RENDER_PREFIX = '*filter'
_RENDER_SUFFIX = 'COMMIT'
_DEFAULTACTION_FORMAT = ':%s %s'
_TERM = Term
| ryantierney513/capirca | lib/speedway.py | Python | apache-2.0 | 1,410 |
#!/usr/bin/env python
"""This modules contains regression tests for config API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import config as config_plugin
from grr_response_server.gui.api_plugins import config_test as config_plugin_test
class ApiListGrrBinariesHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "ListGrrBinaries"
handler = config_plugin.ApiListGrrBinariesHandler
def Run(self):
self.SetUpBinaries()
self.Check("ListGrrBinaries")
class ApiGetGrrBinaryHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinary"
handler = config_plugin.ApiGetGrrBinaryHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinary",
args=config_plugin.ApiGetGrrBinaryArgs(
type="EXECUTABLE", path="windows/test.exe"))
class ApiGetGrrBinaryBlobHandlerRegressionTest(
config_plugin_test.ApiGrrBinaryTestMixin,
api_regression_test_lib.ApiRegressionTest):
api_method = "GetGrrBinaryBlob"
handler = config_plugin.ApiGetGrrBinaryBlobHandler
def Run(self):
self.SetUpBinaries()
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="PYTHON_HACK", path="test"))
self.Check(
"GetGrrBinaryBlob",
args=config_plugin.ApiGetGrrBinaryBlobArgs(
type="EXECUTABLE", path="windows/test.exe"))
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| dunkhong/grr | grr/server/grr_response_server/gui/api_plugins/config_regression_test.py | Python | apache-2.0 | 1,948 |
# -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide an interface for growth experiments."""
from __future__ import absolute_import
import logging
from pandas import DataFrame
from memote.experimental.experiment import Experiment
__all__ = ("GrowthExperiment",)
LOGGER = logging.getLogger(__name__)
class GrowthExperiment(Experiment):
"""Represent a growth experiment."""
SCHEMA = "growth.json"
def __init__(self, **kwargs):
"""
Initialize a growth experiment.
Parameters
----------
kwargs
"""
super(GrowthExperiment, self).__init__(**kwargs)
def load(self, dtype_conversion=None):
"""
Load the data table and corresponding validation schema.
Parameters
----------
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
"""
if dtype_conversion is None:
dtype_conversion = {"growth": str}
super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion)
self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
def evaluate(self, model):
"""Evaluate in silico growth rates."""
with model:
if self.medium is not None:
self.medium.apply(model)
if self.objective is not None:
model.objective = self.objective
model.add_cons_vars(self.constraints)
growth = list()
for row in self.data.itertuples(index=False):
with model:
exchange = model.reactions.get_by_id(row.exchange)
if bool(exchange.reactants):
exchange.lower_bound = -row.uptake
else:
exchange.upper_bound = row.uptake
growth.append(model.slim_optimize() >= self.minimal_growth_rate)
return DataFrame({"exchange": self.data["exchange"], "growth": growth})
| opencobra/memote | src/memote/experimental/growth.py | Python | apache-2.0 | 2,835 |
"""Support for RFXtrx devices."""
import asyncio
import binascii
from collections import OrderedDict
import copy
import logging
import RFXtrx as rfxtrxmod
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_DEVICE_ID,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
DEGREE,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_STOP,
LENGTH_MILLIMETERS,
PERCENTAGE,
POWER_WATT,
PRECIPITATION_MILLIMETERS_PER_HOUR,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
UV_INDEX,
VOLT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_EVENT,
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_DEBUG,
CONF_FIRE_EVENT,
CONF_OFF_DELAY,
CONF_REMOVE_DEVICE,
CONF_SIGNAL_REPETITIONS,
DATA_CLEANUP_CALLBACKS,
DATA_LISTENER,
DATA_RFXOBJECT,
DEVICE_PACKET_TYPE_LIGHTING4,
EVENT_RFXTRX_EVENT,
SERVICE_SEND,
)
DOMAIN = "rfxtrx"
DEFAULT_SIGNAL_REPETITIONS = 1
SIGNAL_EVENT = f"{DOMAIN}_event"
DATA_TYPES = OrderedDict(
[
("Temperature", TEMP_CELSIUS),
("Temperature2", TEMP_CELSIUS),
("Humidity", PERCENTAGE),
("Barometer", PRESSURE_HPA),
("Wind direction", DEGREE),
("Rain rate", PRECIPITATION_MILLIMETERS_PER_HOUR),
("Energy usage", POWER_WATT),
("Total usage", ENERGY_KILO_WATT_HOUR),
("Sound", None),
("Sensor Status", None),
("Counter value", "count"),
("UV", UV_INDEX),
("Humidity status", None),
("Forecast", None),
("Forecast numeric", None),
("Rain total", LENGTH_MILLIMETERS),
("Wind average speed", SPEED_METERS_PER_SECOND),
("Wind gust", SPEED_METERS_PER_SECOND),
("Chill", TEMP_CELSIUS),
("Count", "count"),
("Current Ch. 1", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 2", ELECTRICAL_CURRENT_AMPERE),
("Current Ch. 3", ELECTRICAL_CURRENT_AMPERE),
("Voltage", VOLT),
("Current", ELECTRICAL_CURRENT_AMPERE),
("Battery numeric", PERCENTAGE),
("Rssi numeric", SIGNAL_STRENGTH_DECIBELS_MILLIWATT),
]
)
_LOGGER = logging.getLogger(__name__)
def _bytearray_string(data):
val = cv.string(data)
try:
return bytearray.fromhex(val)
except ValueError as err:
raise vol.Invalid(
"Data must be a hex string with multiple of two characters"
) from err
def _ensure_device(value):
if value is None:
return DEVICE_DATA_SCHEMA({})
return DEVICE_DATA_SCHEMA(value)
SERVICE_SEND_SCHEMA = vol.Schema({ATTR_EVENT: _bytearray_string})
DEVICE_DATA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_OFF_DELAY): vol.All(
cv.time_period, cv.positive_timedelta, lambda value: value.total_seconds()
),
vol.Optional(CONF_DATA_BITS): cv.positive_int,
vol.Optional(CONF_COMMAND_ON): cv.byte,
vol.Optional(CONF_COMMAND_OFF): cv.byte,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=1): cv.positive_int,
}
)
BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEBUG): cv.boolean,
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {cv.string: _ensure_device},
},
)
DEVICE_SCHEMA = BASE_SCHEMA.extend({vol.Required(CONF_DEVICE): cv.string})
PORT_SCHEMA = BASE_SCHEMA.extend(
{vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_HOST): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.deprecated(CONF_DEBUG), vol.Any(DEVICE_SCHEMA, PORT_SCHEMA))},
extra=vol.ALLOW_EXTRA,
)
DOMAINS = ["switch", "sensor", "light", "binary_sensor", "cover"]
async def async_setup(hass, config):
"""Set up the RFXtrx component."""
if DOMAIN not in config:
return True
data = {
CONF_HOST: config[DOMAIN].get(CONF_HOST),
CONF_PORT: config[DOMAIN].get(CONF_PORT),
CONF_DEVICE: config[DOMAIN].get(CONF_DEVICE),
CONF_AUTOMATIC_ADD: config[DOMAIN].get(CONF_AUTOMATIC_ADD),
CONF_DEVICES: config[DOMAIN][CONF_DEVICES],
}
# Read device_id from the event code add to the data that will end up in the ConfigEntry
for event_code, event_config in data[CONF_DEVICES].items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
event_config[CONF_DEVICE_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=data,
)
)
return True
async def async_setup_entry(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][DATA_CLEANUP_CALLBACKS] = []
try:
await async_setup_internal(hass, entry)
except asyncio.TimeoutError:
# Library currently doesn't support reload
_LOGGER.error(
"Connection timeout: failed to receive response from RFXtrx device"
)
return False
for domain in DOMAINS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, domain)
)
return True
async def async_unload_entry(hass, entry: config_entries.ConfigEntry):
"""Unload RFXtrx component."""
if not all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in DOMAINS
]
)
):
return False
hass.services.async_remove(DOMAIN, SERVICE_SEND)
for cleanup_callback in hass.data[DOMAIN][DATA_CLEANUP_CALLBACKS]:
cleanup_callback()
listener = hass.data[DOMAIN][DATA_LISTENER]
listener()
rfx_object = hass.data[DOMAIN][DATA_RFXOBJECT]
await hass.async_add_executor_job(rfx_object.close_connection)
hass.data.pop(DOMAIN)
return True
def _create_rfx(config):
"""Construct a rfx object based on config."""
if config[CONF_PORT] is not None:
# If port is set then we create a TCP connection
rfx = rfxtrxmod.Connect(
(config[CONF_HOST], config[CONF_PORT]),
None,
transport_protocol=rfxtrxmod.PyNetworkTransport,
)
else:
rfx = rfxtrxmod.Connect(config[CONF_DEVICE], None)
return rfx
def _get_device_lookup(devices):
"""Get a lookup structure for devices."""
lookup = {}
for event_code, event_config in devices.items():
event = get_rfx_object(event_code)
if event is None:
continue
device_id = get_device_id(
event.device, data_bits=event_config.get(CONF_DATA_BITS)
)
lookup[device_id] = event_config
return lookup
async def async_setup_internal(hass, entry: config_entries.ConfigEntry):
"""Set up the RFXtrx component."""
config = entry.data
# Initialize library
async with async_timeout.timeout(30):
rfx_object = await hass.async_add_executor_job(_create_rfx, config)
# Setup some per device config
devices = _get_device_lookup(config[CONF_DEVICES])
# Declare the Handle event
@callback
def async_handle_receive(event):
"""Handle received messages from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
event_data = {
"packet_type": event.device.packettype,
"sub_type": event.device.subtype,
"type_string": event.device.type_string,
"id_string": event.device.id_string,
"data": binascii.hexlify(event.data).decode("ASCII"),
"values": getattr(event, "values", None),
}
_LOGGER.debug("Receive RFXCOM event: %s", event_data)
data_bits = get_device_data_bits(event.device, devices)
device_id = get_device_id(event.device, data_bits=data_bits)
# Register new devices
if config[CONF_AUTOMATIC_ADD] and device_id not in devices:
_add_device(event, device_id)
# Callback to HA registered components.
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_EVENT, event, device_id)
# Signal event to any other listeners
fire_event = devices.get(device_id, {}).get(CONF_FIRE_EVENT)
if fire_event:
hass.bus.async_fire(EVENT_RFXTRX_EVENT, event_data)
@callback
def _add_device(event, device_id):
"""Add a device to config entry."""
config = DEVICE_DATA_SCHEMA({})
config[CONF_DEVICE_ID] = device_id
data = entry.data.copy()
data[CONF_DEVICES] = copy.deepcopy(entry.data[CONF_DEVICES])
event_code = binascii.hexlify(event.data).decode("ASCII")
data[CONF_DEVICES][event_code] = config
hass.config_entries.async_update_entry(entry=entry, data=data)
devices[device_id] = config
def _shutdown_rfxtrx(event):
"""Close connection with RFXtrx."""
rfx_object.close_connection()
listener = hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx)
hass.data[DOMAIN][DATA_LISTENER] = listener
hass.data[DOMAIN][DATA_RFXOBJECT] = rfx_object
rfx_object.event_callback = lambda event: hass.add_job(async_handle_receive, event)
def send(call):
event = call.data[ATTR_EVENT]
rfx_object.transport.send(event)
hass.services.async_register(DOMAIN, SERVICE_SEND, send, schema=SERVICE_SEND_SCHEMA)
def get_rfx_object(packetid):
"""Return the RFXObject with the packetid."""
try:
binarypacket = bytearray.fromhex(packetid)
except ValueError:
return None
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is None:
return None
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
obj.data = binarypacket
return obj
def get_pt2262_deviceid(device_id, nb_data_bits):
"""Extract and return the address bits from a Lighting4/PT2262 packet."""
if nb_data_bits is None:
return
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ~((1 << nb_data_bits) - 1)
data[len(data) - 1] &= mask
return binascii.hexlify(data)
def get_pt2262_cmd(device_id, data_bits):
"""Extract and return the data bits from a Lighting4/PT2262 packet."""
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ((1 << data_bits) - 1)
return hex(data[-1] & mask)
def get_device_data_bits(device, devices):
"""Deduce data bits for device based on a cache of device bits."""
data_bits = None
if device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
for device_id, entity_config in devices.items():
bits = entity_config.get(CONF_DATA_BITS)
if get_device_id(device, bits) == device_id:
data_bits = bits
break
return data_bits
def find_possible_pt2262_device(device_ids, device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id in device_ids:
if len(dev_id) == len(device_id):
size = None
for i, (char1, char2) in enumerate(zip(dev_id, device_id)):
if char1 != char2:
break
size = i
if size is not None:
size = len(dev_id) - size - 1
_LOGGER.info(
"rfxtrx: found possible device %s for %s "
"with the following configuration:\n"
"data_bits=%d\n"
"command_on=0x%s\n"
"command_off=0x%s\n",
device_id,
dev_id,
size * 4,
dev_id[-size:],
device_id[-size:],
)
return dev_id
return None
def get_device_id(device, data_bits=None):
"""Calculate a device id for device."""
id_string = device.id_string
if data_bits and device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
masked_id = get_pt2262_deviceid(id_string, data_bits)
if masked_id:
id_string = masked_id.decode("ASCII")
return (f"{device.packettype:x}", f"{device.subtype:x}", id_string)
def connect_auto_add(hass, entry_data, callback_fun):
"""Connect to dispatcher for automatic add."""
if entry_data[CONF_AUTOMATIC_ADD]:
hass.data[DOMAIN][DATA_CLEANUP_CALLBACKS].append(
hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_EVENT, callback_fun)
)
class RfxtrxEntity(RestoreEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, event=None):
"""Initialize the device."""
self._name = f"{device.type_string} {device.id_string}"
self._device = device
self._event = event
self._device_id = device_id
self._unique_id = "_".join(x for x in self._device_id)
async def async_added_to_hass(self):
"""Restore RFXtrx device state (ON/OFF)."""
if self._event:
self._apply_event(self._event)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_EVENT, self._handle_event
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
f"{DOMAIN}_{CONF_REMOVE_DEVICE}_{self._device_id}", self.async_remove
)
)
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if not self._event:
return None
return {ATTR_EVENT: "".join(f"{x:02x}" for x in self._event.data)}
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def unique_id(self):
"""Return unique identifier of remote device."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, *self._device_id)},
"name": f"{self._device.type_string} {self._device.id_string}",
"model": self._device.type_string,
}
def _apply_event(self, event):
"""Apply a received event."""
self._event = event
@callback
def _handle_event(self, event, device_id):
"""Handle a reception of data, overridden by other classes."""
class RfxtrxCommandEntity(RfxtrxEntity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, device, device_id, signal_repetitions=1, event=None):
"""Initialzie a switch or light device."""
super().__init__(device, device_id, event=event)
self.signal_repetitions = signal_repetitions
self._state = None
async def _async_send(self, fun, *args):
rfx_object = self.hass.data[DOMAIN][DATA_RFXOBJECT]
for _ in range(self.signal_repetitions):
await self.hass.async_add_executor_job(fun, rfx_object.transport, *args)
| tboyce021/home-assistant | homeassistant/components/rfxtrx/__init__.py | Python | apache-2.0 | 16,421 |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from tests.utils import (
MockHttpResource, DeferredMockCallable, setup_test_homeserver
)
from synapse.api.filtering import Filter
from synapse.events import FrozenEvent
user_localpart = "test_user"
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return FrozenEvent(kwargs)
class FilteringTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.mock_federation_resource = MockHttpResource()
self.mock_http_client = Mock(spec=[])
self.mock_http_client.put_json = DeferredMockCallable()
hs = yield setup_test_homeserver(
handlers=None,
http_client=self.mock_http_client,
keyring=Mock(),
)
self.filtering = hs.get_filtering()
self.datastore = hs.get_datastore()
def test_definition_types_works_with_literals(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_wildcards(self):
definition = {
"types": ["m.*", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_unknowns(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="now.for.something.completely.different",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_literals(self):
definition = {
"not_types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_wildcards(self):
definition = {
"not_types": ["m.room.message", "org.matrix.*"]
}
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_unknowns(self):
definition = {
"not_types": ["m.*", "org.*"]
}
event = MockEvent(
sender="@foo:bar",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_types_takes_priority_over_types(self):
definition = {
"not_types": ["m.*", "org.*"],
"types": ["m.room.message", "m.room.topic"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_senders_works_with_literals(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_senders_works_with_unknowns(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_literals(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_unknowns(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_senders_takes_priority_over_senders(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets", "@misspiggy:muppets"]
}
event = MockEvent(
sender="@misspiggy:muppets",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_literals(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_unknowns(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_literals(self):
definition = {
"not_rooms": ["!anothersecretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_unknowns(self):
definition = {
"not_rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_rooms_takes_priority_over_rooms(self):
definition = {
"not_rooms": ["!secretbase:unknown"],
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="m.room.message", # yup
room_id="!stage:unknown" # yup
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_sender(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@misspiggy:muppets", # nope
type="m.room.message", # yup
room_id="!stage:unknown" # yup
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_room(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="m.room.message", # yup
room_id="!piggyshouse:muppets" # nope
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_type(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="muppets.misspiggy.kisses", # nope
room_id="!stage:unknown" # yup
)
self.assertFalse(
Filter(definition).check(event)
)
@defer.inlineCallbacks
def test_filter_presence_match(self):
user_filter_json = {
"presence": {
"types": ["m.*"]
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="m.profile",
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_presence(events=events)
self.assertEquals(events, results)
@defer.inlineCallbacks
def test_filter_presence_no_match(self):
user_filter_json = {
"presence": {
"types": ["m.*"]
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart + "2",
user_filter=user_filter_json,
)
event = MockEvent(
event_id="$asdasd:localhost",
sender="@foo:bar",
type="custom.avatar.3d.crazy",
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart + "2",
filter_id=filter_id,
)
results = user_filter.filter_presence(events=events)
self.assertEquals([], results)
@defer.inlineCallbacks
def test_filter_room_state_match(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_room_state(events=events)
self.assertEquals(events, results)
@defer.inlineCallbacks
def test_filter_room_state_no_match(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_room_state(events)
self.assertEquals([], results)
def test_filter_rooms(self):
definition = {
"rooms": ["!allowed:example.com", "!excluded:example.com"],
"not_rooms": ["!excluded:example.com"],
}
room_ids = [
"!allowed:example.com", # Allowed because in rooms and not in not_rooms.
"!excluded:example.com", # Disallowed because in not_rooms.
"!not_included:example.com", # Disallowed because not in rooms.
]
filtered_room_ids = list(Filter(definition).filter_rooms(room_ids))
self.assertEquals(filtered_room_ids, ["!allowed:example.com"])
@defer.inlineCallbacks
def test_add_filter(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.filtering.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
self.assertEquals(filter_id, 0)
self.assertEquals(user_filter_json, (
yield self.datastore.get_user_filter(
user_localpart=user_localpart,
filter_id=0,
)
))
@defer.inlineCallbacks
def test_get_filter(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
self.assertEquals(filter.get_filter_json(), user_filter_json)
self.assertRegexpMatches(repr(filter), r"<FilterCollection \{.*\}>")
| TribeMedia/synapse | tests/api/test_filtering.py | Python | apache-2.0 | 15,549 |
#!/usr/bin/env python3
from itertools import combinations, chain
from enum import Enum, auto
LINUX = 'linux'
OSX = 'osx'
WINDOWS = 'windows'
AMD64 = 'amd64'
ARM64 = 'arm64'
PPC64LE = 'ppc64le'
TRAVIS_TEMPLATE = """\
# This config file is generated by ./scripts/gen_travis.py.
# Do not edit by hand.
# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
# the software provided by 'generic' is simply not needed for our tests.
# Differences are explained here:
# https://docs.travis-ci.com/user/languages/minimal-and-generic/
language: minimal
dist: focal
jobs:
include:
{jobs}
before_install:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_install.sh
fi
before_script:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_script.sh
else
scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
autoconf
# If COMPILER_FLAGS are not empty, add them to CC and CXX
./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" \
CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
make -j3
make -j3 tests
fi
script:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/script.sh
else
make check
fi
"""
class Option(object):
class Type:
COMPILER = auto()
COMPILER_FLAG = auto()
CONFIGURE_FLAG = auto()
MALLOC_CONF = auto()
FEATURE = auto()
def __init__(self, type, value):
self.type = type
self.value = value
@staticmethod
def as_compiler(value):
return Option(Option.Type.COMPILER, value)
@staticmethod
def as_compiler_flag(value):
return Option(Option.Type.COMPILER_FLAG, value)
@staticmethod
def as_configure_flag(value):
return Option(Option.Type.CONFIGURE_FLAG, value)
@staticmethod
def as_malloc_conf(value):
return Option(Option.Type.MALLOC_CONF, value)
@staticmethod
def as_feature(value):
return Option(Option.Type.FEATURE, value)
def __eq__(self, obj):
return (isinstance(obj, Option) and obj.type == self.type
and obj.value == self.value)
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
MAX_UNUSUAL_OPTIONS = 2
GCC = Option.as_compiler('CC=gcc CXX=g++')
CLANG = Option.as_compiler('CC=clang CXX=clang++')
CL = Option.as_compiler('CC=cl.exe CXX=cl.exe')
compilers_unusual = [CLANG,]
CROSS_COMPILE_32BIT = Option.as_feature('CROSS_COMPILE_32BIT')
feature_unusuals = [CROSS_COMPILE_32BIT]
configure_flag_unusuals = [Option.as_configure_flag(opt) for opt in (
'--enable-debug',
'--enable-prof',
'--disable-stats',
'--disable-libdl',
'--enable-opt-safety-checks',
'--with-lg-page=16',
)]
malloc_conf_unusuals = [Option.as_malloc_conf(opt) for opt in (
'tcache:false',
'dss:primary',
'percpu_arena:percpu',
'background_thread:true',
)]
all_unusuals = (compilers_unusual + feature_unusuals
+ configure_flag_unusuals + malloc_conf_unusuals)
def get_extra_cflags(os, compiler):
if os == WINDOWS:
# For non-CL compilers under Windows (for now it's only MinGW-GCC),
# -fcommon needs to be specified to correctly handle multiple
# 'malloc_conf' symbols and such, which are declared weak under Linux.
# Weak symbols don't work with MinGW-GCC.
if compiler != CL.value:
return ['-fcommon']
else:
return []
# We get some spurious errors when -Warray-bounds is enabled.
extra_cflags = ['-Werror', '-Wno-array-bounds']
if compiler == CLANG.value or os == OSX:
extra_cflags += [
'-Wno-unknown-warning-option',
'-Wno-ignored-attributes'
]
if os == OSX:
extra_cflags += [
'-Wno-deprecated-declarations',
]
return extra_cflags
# Formats a job from a combination of flags
def format_job(os, arch, combination):
compilers = [x.value for x in combination if x.type == Option.Type.COMPILER]
assert(len(compilers) <= 1)
compiler_flags = [x.value for x in combination if x.type == Option.Type.COMPILER_FLAG]
configure_flags = [x.value for x in combination if x.type == Option.Type.CONFIGURE_FLAG]
malloc_conf = [x.value for x in combination if x.type == Option.Type.MALLOC_CONF]
features = [x.value for x in combination if x.type == Option.Type.FEATURE]
if len(malloc_conf) > 0:
configure_flags.append('--with-malloc-conf=' + ','.join(malloc_conf))
if not compilers:
compiler = GCC.value
else:
compiler = compilers[0]
extra_environment_vars = ''
cross_compile = CROSS_COMPILE_32BIT.value in features
if os == LINUX and cross_compile:
compiler_flags.append('-m32')
features_str = ' '.join([' {}=yes'.format(feature) for feature in features])
stringify = lambda arr, name: ' {}="{}"'.format(name, ' '.join(arr)) if arr else ''
env_string = '{}{}{}{}{}{}'.format(
compiler,
features_str,
stringify(compiler_flags, 'COMPILER_FLAGS'),
stringify(configure_flags, 'CONFIGURE_FLAGS'),
stringify(get_extra_cflags(os, compiler), 'EXTRA_CFLAGS'),
extra_environment_vars)
job = ' - os: {}\n'.format(os)
job += ' arch: {}\n'.format(arch)
job += ' env: {}'.format(env_string)
return job
def generate_unusual_combinations(unusuals, max_unusual_opts):
"""
Generates different combinations of non-standard compilers, compiler flags,
configure flags and malloc_conf settings.
@param max_unusual_opts: Limit of unusual options per combination.
"""
return chain.from_iterable(
[combinations(unusuals, i) for i in range(max_unusual_opts + 1)])
def included(combination, exclude):
"""
Checks if the combination of options should be included in the Travis
testing matrix.
@param exclude: A list of options to be avoided.
"""
return not any(excluded in combination for excluded in exclude)
def generate_jobs(os, arch, exclude, max_unusual_opts, unusuals=all_unusuals):
jobs = []
for combination in generate_unusual_combinations(unusuals, max_unusual_opts):
if included(combination, exclude):
jobs.append(format_job(os, arch, combination))
return '\n'.join(jobs)
def generate_linux(arch):
os = LINUX
# Only generate 2 unusual options for AMD64 to reduce matrix size
max_unusual_opts = MAX_UNUSUAL_OPTIONS if arch == AMD64 else 1
exclude = []
if arch == PPC64LE:
# Avoid 32 bit builds and clang on PowerPC
exclude = (CROSS_COMPILE_32BIT, CLANG,)
return generate_jobs(os, arch, exclude, max_unusual_opts)
def generate_macos(arch):
os = OSX
max_unusual_opts = 1
exclude = ([Option.as_malloc_conf(opt) for opt in (
'dss:primary',
'percpu_arena:percpu',
'background_thread:true')] +
[Option.as_configure_flag('--enable-prof')] +
[CLANG,])
return generate_jobs(os, arch, exclude, max_unusual_opts)
def generate_windows(arch):
os = WINDOWS
max_unusual_opts = 3
unusuals = (
Option.as_configure_flag('--enable-debug'),
CL,
CROSS_COMPILE_32BIT,
)
return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
def get_manual_jobs():
return """\
# Development build
- os: linux
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
--disable-cache-oblivious --enable-stats --enable-log --enable-prof" \
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# --enable-expermental-smallocx:
- os: linux
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
--enable-experimental-smallocx --enable-stats --enable-prof" \
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
"""
def main():
jobs = '\n'.join((
generate_linux(AMD64),
generate_linux(PPC64LE),
generate_macos(AMD64),
#generate_windows(AMD64),
get_manual_jobs()
))
print(TRAVIS_TEMPLATE.format(jobs=jobs))
if __name__ == '__main__':
main()
| arangodb/arangodb | 3rdParty/jemalloc/v5.2.1/scripts/gen_travis.py | Python | apache-2.0 | 8,683 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from tempest.lib import exceptions
from neutronclient.tests.functional import base
class SimpleReadOnlyNeutronClientTest(base.ClientTestBase):
"""This is a first pass at a simple read only python-neutronclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-neutron-exist')
# NOTE(mestery): Commands in order listed in 'neutron help'
# Optional arguments:
def test_neutron_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
def test_neutron_net_list(self):
net_list = self.parser.listing(self.neutron('net-list'))
self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
def test_neutron_ext_list(self):
ext = self.parser.listing(self.neutron('ext-list'))
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
self.neutron('dhcp-agent-list-hosting-net',
params='private')
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
self.assertTableStruct(agents, field_names)
def test_neutron_floatingip_list(self):
self.neutron('floatingip-list')
def test_neutron_meter_label_list(self):
self.neutron('meter-label-list')
def test_neutron_meter_label_rule_list(self):
self.neutron('meter-label-rule-list')
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
def test_neutron_lb_healthmonitor_list(self):
self._test_neutron_lbaas_command('lb-healthmonitor-list')
def test_neutron_lb_member_list(self):
self._test_neutron_lbaas_command('lb-member-list')
def test_neutron_lb_pool_list(self):
self._test_neutron_lbaas_command('lb-pool-list')
def test_neutron_lb_vip_list(self):
self._test_neutron_lbaas_command('lb-vip-list')
def test_neutron_net_external_list(self):
net_ext_list = self.parser.listing(self.neutron('net-external-list'))
self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
def test_neutron_port_list(self):
port_list = self.parser.listing(self.neutron('port-list'))
self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
'fixed_ips'])
def test_neutron_quota_list(self):
self.neutron('quota-list')
def test_neutron_router_list(self):
router_list = self.parser.listing(self.neutron('router-list'))
self.assertTableStruct(router_list, ['id', 'name',
'external_gateway_info'])
def test_neutron_security_group_list(self):
security_grp = self.parser.listing(self.neutron('security-group-list'))
self.assertTableStruct(security_grp, ['id', 'name',
'security_group_rules'])
def test_neutron_security_group_rule_list(self):
security_grp = self.parser.listing(self.neutron
('security-group-rule-list'))
self.assertTableStruct(security_grp, ['id', 'security_group',
'direction', 'ethertype',
'port/protocol', 'remote'])
def test_neutron_subnet_list(self):
subnet_list = self.parser.listing(self.neutron('subnet-list'))
self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
'allocation_pools'])
def test_neutron_firewall_list(self):
firewall_list = self.parser.listing(self.neutron
('firewall-list'))
self.assertTableStruct(firewall_list, ['id', 'name',
'firewall_policy_id'])
def test_neutron_firewall_policy_list(self):
firewall_policy = self.parser.listing(self.neutron
('firewall-policy-list'))
self.assertTableStruct(firewall_policy, ['id', 'name',
'firewall_rules'])
def test_neutron_firewall_rule_list(self):
firewall_rule = self.parser.listing(self.neutron
('firewall-rule-list'))
self.assertTableStruct(firewall_rule, ['id', 'name',
'firewall_policy_id',
'summary', 'enabled'])
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
for line in lines[cmds_start:]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
'router-show', 'agent-update', 'help'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_neutron_version(self):
self.neutron('', flags='--version')
def test_neutron_debug_net_list(self):
self.neutron('net-list', flags='--debug')
def test_neutron_quiet_net_list(self):
self.neutron('net-list', flags='--quiet')
| eayunstack/python-neutronclient | neutronclient/tests/functional/core/test_readonly_neutron.py | Python | apache-2.0 | 6,814 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import lxml.html
import re
import urllib2
from urlparse import urlparse, urlunparse
from django.core.urlresolvers import reverse
from desktop.lib.view_util import format_duration_in_millis
from desktop.lib import i18n
from django.utils.html import escape
from filebrowser.views import location_to_url
from hadoop import job_tracker
from hadoop import confparse
from hadoop.api.jobtracker.ttypes import JobNotFoundException
import hadoop.api.jobtracker.ttypes as ttypes
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
LOGGER = logging.getLogger(__name__)
def can_view_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-view-job', '')
return acl == '*' or username in acl.split(',')
def can_modify_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-modify-job', '')
return acl == '*' or username in acl.split(',')
def get_acls(job):
if job.is_mr2:
return job.acls
else:
return job.full_job_conf
class JobLinkage(object):
"""
A thin representation of a job, without much of the details.
Its purpose is to wrap a JobID to allow us to get further
information from Hadoop, without instantiating a full Job object
(which requires talking to Hadoop).
"""
def __init__(self, jobtracker, jobid):
"""
JobLinkage(jobtracker, jobid) -> JobLinkage
The jobid is the jobid string (not the thrift jobid)
"""
self._jobtracker = jobtracker
self.jobId = jobid
self.jobId_short = "_".join(jobid.split("_")[-2:])
self.is_mr2 = False
def get_task(self, task_id):
"""Retrieve a TaskInProgress from hadoop."""
ttask = self._jobtracker.get_task(
self._jobtracker.thriftjobid_from_string(self.jobId),
self._jobtracker.thrifttaskid_from_string(task_id))
return Task(ttask, self._jobtracker)
class Job(JobLinkage):
"""
Creates a Job instance pulled from the job tracker Thrift interface.
"""
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
@staticmethod
def from_id(jt, jobid, is_finished=False):
"""
Returns a Job instance given a job tracker interface and an id. The job tracker interface is typically
located in request.jt.
"""
try:
thriftjob = jt.get_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException:
try:
thriftjob = jt.get_retired_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException, e:
raise PopupException(_("Could not find job with id %(jobid)s.") % {'jobid': jobid}, detail=e)
return Job(jt, thriftjob)
@staticmethod
def from_thriftjob(jt, thriftjob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that job tracker interface.
The job tracker interface is typically located in request.jt
"""
return Job(jt, thriftjob)
def __init__(self, jt, thriftJob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that
job tracker interface. The job tracker interface is typically located in request.jt
"""
JobLinkage.__init__(self, jt, thriftJob.jobID.asString)
self.jt = jt
self.job = thriftJob
self.tasks = []
if self.job.tasks is not None:
self.tasks = TaskList.from_thriftTaskList(self.job.tasks, jt)
self.task_map = dict( (task.taskId, task) for task in self.tasks )
self._counters = None
self._conf_keys = None
self._full_job_conf = None
self._init_attributes()
self.is_retired = hasattr(thriftJob, 'is_retired')
self.is_mr2 = False
self.applicationType = 'MR2'
@property
def counters(self):
if self.is_retired:
self._counters = {}
elif self._counters is None:
rollups = self.jt.get_job_counter_rollups(self.job.jobID)
# We get back a structure with counter lists for maps, reduces, and total
# and we need to invert this
def aggregate_counters(ctrs_from_jt, key, target):
for group in ctrs_from_jt.groups:
if group.name not in target:
target[group.name] = {
'name': group.name,
'displayName': group.displayName,
'counters': {}
}
agg_counters = target[group.name]['counters']
for counter in group.counters.itervalues():
if counter.name not in agg_counters:
agg_counters[counter.name] = {
'name': counter.name,
'displayName': counter.displayName,
}
agg_counters[counter.name][key] = counter.value
self._counters = {}
aggregate_counters(rollups.mapCounters, "map", self._counters)
aggregate_counters(rollups.reduceCounters, "reduce", self._counters)
aggregate_counters(rollups.jobCounters, "total", self._counters)
return self._counters
@property
def conf_keys(self):
if self._conf_keys is None:
self._initialize_conf_keys()
return self._conf_keys
@property
def full_job_conf(self):
if self._full_job_conf is None:
self._initialize_conf_keys()
return self._full_job_conf
def _init_attributes(self):
self.queueName = i18n.smart_unicode(self.job.profile.queueName)
self.jobName = i18n.smart_unicode(self.job.profile.name)
self.user = i18n.smart_unicode(self.job.profile.user)
self.mapProgress = self.job.status.mapProgress
self.reduceProgress = self.job.status.reduceProgress
self.setupProgress = self.job.status.setupProgress
self.cleanupProgress = self.job.status.cleanupProgress
if self.job.desiredMaps == 0:
maps_percent_complete = 0
else:
maps_percent_complete = int(round(float(self.job.finishedMaps) / self.job.desiredMaps * 100))
self.desiredMaps = self.job.desiredMaps
if self.job.desiredReduces == 0:
reduces_percent_complete = 0
else:
reduces_percent_complete = int(round(float(self.job.finishedReduces) / self.job.desiredReduces * 100))
self.desiredReduces = self.job.desiredReduces
self.maps_percent_complete = maps_percent_complete
self.finishedMaps = self.job.finishedMaps
self.finishedReduces = self.job.finishedReduces
self.reduces_percent_complete = reduces_percent_complete
self.startTimeMs = self.job.startTime
self.startTimeFormatted = format_unixtime_ms(self.job.startTime)
self.launchTimeMs = self.job.launchTime
self.launchTimeFormatted = format_unixtime_ms(self.job.launchTime)
self.finishTimeMs = self.job.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.job.finishTime)
self.status = self.job.status.runStateAsString
self.priority = self.job.priorityAsString
self.jobFile = self.job.profile.jobFile
finishTime = self.job.finishTime
if finishTime == 0:
finishTime = datetime.datetime.now()
else:
finishTime = datetime.datetime.fromtimestamp(finishTime / 1000)
self.duration = finishTime - datetime.datetime.fromtimestamp(self.job.startTime / 1000)
diff = int(finishTime.strftime("%s")) * 1000 - self.startTimeMs
self.durationFormatted = format_duration_in_millis(diff)
self.durationInMillis = diff
def kill(self):
self.jt.kill_job(self.job.jobID)
def get_task(self, id):
try:
return self.task_map[id]
except:
return JobLinkage.get_task(self, id)
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
"""
Filters the tasks of the job.
Pass in task_type and task_state as sets; None for "all".
task_text is used to search in the state, mostRecentState, and the ID.
"""
assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
def is_good_match(t):
if task_types is not None:
if t.task.taskID.taskTypeAsString.lower() not in task_types:
return False
if task_states is not None:
if t.state.lower() not in task_states:
return False
if task_text is not None:
tt_lower = task_text.lower()
if tt_lower not in t.state.lower() and tt_lower not in t.mostRecentState.lower() and tt_lower not in t.task.taskID.asString.lower():
return False
return True
return [ t for t in self.tasks if is_good_match(t) ]
def _initialize_conf_keys(self):
if self.is_retired:
self._conf_keys = {}
self._full_job_conf = {}
else:
conf_keys = [
'mapred.mapper.class',
'mapred.reducer.class',
'mapred.input.format.class',
'mapred.output.format.class',
'mapred.input.dir',
'mapred.output.dir',
]
jobconf = get_jobconf(self.jt, self.jobId)
self._full_job_conf = jobconf
self._conf_keys = {}
for k, v in jobconf.iteritems():
if k in conf_keys:
self._conf_keys[dots_to_camel_case(k)] = v
class TaskList(object):
@staticmethod
def select(jt, jobid, task_types, task_states, text, count, offset):
"""
select(jt, jobid, task_types, task_states, text, count, offset) -> TaskList
Retrieve a TaskList from Hadoop according to the given criteria.
task_types is a set of job_tracker.VALID_TASK_TYPES. A value to None means everything.
task_states is a set of job_tracker.VALID_TASK_STATES. A value to None means everything.
"""
assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
if task_types is None:
task_types = job_tracker.VALID_TASK_TYPES
if task_states is None:
task_states = job_tracker.VALID_TASK_STATES
tjobid = jt.thriftjobid_from_string(jobid)
thrift_list = jt.get_task_list(tjobid, task_types, task_states, text, count, offset)
return TaskList.from_thriftTaskList(thrift_list, jt)
@staticmethod
def from_thriftTaskList(thrift_task_list, jobtracker):
"""TaskList.from_thriftTaskList(thrift_task_list, jobtracker) -> TaskList
"""
if thrift_task_list is None:
return None
return TaskList(thrift_task_list, jobtracker)
def __init__(self, tasklist, jobtracker):
self.__tasklist = tasklist # The thrift task list
self.__jt = jobtracker
self.__init_attributes()
def __init_attributes(self):
self.__tasksSoFar = [ Task(t, self.__jt) for t in self.__tasklist.tasks ]
self.__nTotalTasks = self.__tasklist.numTotalTasks
def __iter__(self):
return self.__tasksSoFar.__iter__()
def __len__(self):
return len(self.__tasksSoFar)
def __getitem__(self, key):
return self.__tasksSoFar[key]
@property
def tasks(self):
return self.__tasksSoFar
@property
def numTotalTasks(self):
return self.__nTotalTasks
class Task(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
def __init__(self, task, jt):
self.task = task
self.jt = jt
self._init_attributes()
self.attempt_map = {}
for id, attempt in self.task.taskStatuses.iteritems():
ta = TaskAttempt(attempt, task=self)
self.attempt_map[id] = ta
@property
def attempts(self):
return self.attempt_map.values()
def _init_attributes(self):
self.taskType = self.task.taskID.taskTypeAsString
self.taskId = self.task.taskID.asString
self.taskId_short = "_".join(self.taskId.split("_")[-2:])
self.startTimeMs = self.task.startTime
self.startTimeFormatted = format_unixtime_ms(self.task.startTime)
self.execStartTimeMs = self.task.execStartTime
self.execStartTimeFormatted = format_unixtime_ms(self.task.execStartTime)
self.execFinishTimeMs = self.task.execFinishTime
self.execFinishTimeFormatted = format_unixtime_ms(self.task.execFinishTime)
self.state = self.task.state
assert self.state in job_tracker.VALID_TASK_STATES
self.progress = self.task.progress
self.taskId = self.task.taskID.asString
self.jobId = self.task.taskID.jobID.asString
self.taskAttemptIds = self.task.taskStatuses.keys()
self.mostRecentState = self.task.mostRecentState
self.diagnosticMap = self.task.taskDiagnosticData
self.counters = self.task.counters
self.failed = self.task.failed
self.complete = self.task.complete
self.is_mr2 = False
def get_attempt(self, id):
"""
Returns a TaskAttempt for a given id.
"""
return self.attempt_map[id]
class TaskAttempt(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve task["foo"] as task.foo.
"""
return getattr(self, item)
def __init__(self, task_attempt, task):
assert task_attempt is not None
self.task_attempt = task_attempt
self.task = task
self._init_attributes();
def _init_attributes(self):
self.taskType = self.task_attempt.taskID.taskID.taskTypeAsString
self.attemptId = self.task_attempt.taskID.asString
self.attemptId_short = "_".join(self.attemptId.split("_")[-2:])
self.startTimeMs = self.task_attempt.startTime
self.startTimeFormatted = format_unixtime_ms(self.task_attempt.startTime)
self.finishTimeMs = self.task_attempt.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.task_attempt.finishTime)
self.state = self.task_attempt.stateAsString.lower()
self.taskTrackerId = self.task_attempt.taskTracker
self.phase = self.task_attempt.phaseAsString
self.progress = self.task_attempt.progress
self.outputSize = self.task_attempt.outputSize
self.shuffleFinishTimeMs = self.task_attempt.shuffleFinishTime
self.shuffleFinishTimeFormatted = format_unixtime_ms(self.task_attempt.shuffleFinishTime)
self.sortFinishTimeMs = self.task_attempt.sortFinishTime
self.sortFinishTimeFormatted = format_unixtime_ms(self.task_attempt.sortFinishTime)
self.mapFinishTimeMs = self.task_attempt.mapFinishTime # DO NOT USE, NOT VALID IN 0.20
self.mapFinishTimeFormatted = format_unixtime_ms(self.task_attempt.mapFinishTime)
self.counters = self.task_attempt.counters
self.is_mr2 = False
def get_tracker(self):
try:
tracker = Tracker.from_name(self.task.jt, self.taskTrackerId)
return tracker
except ttypes.TaskTrackerNotFoundException, e:
LOGGER.warn("Tracker %s not found: %s" % (self.taskTrackerId, e))
if LOGGER.isEnabledFor(logging.DEBUG):
all_trackers = self.task.jt.all_task_trackers()
for t in all_trackers.trackers:
LOGGER.debug("Available tracker: %s" % (t.trackerName,))
raise ttypes.TaskTrackerNotFoundException(
_("Cannot look up TaskTracker %(id)s.") % {'id': self.taskTrackerId})
def get_task_log(self):
"""
get_task_log(task_id) -> (stdout_text, stderr_text, syslog_text)
Retrieve the task log from the TaskTracker, at this url:
http://<tracker_host>:<port>/tasklog?taskid=<attempt_id>
Optional query string:
&filter=<source> : where <source> is 'syslog', 'stdout', or 'stderr'.
&start=<offset> : specify the start offset of the log section, when using a filter.
&end=<offset> : specify the end offset of the log section, when using a filter.
"""
tracker = self.get_tracker()
url = urlunparse(('http',
'%s:%s' % (tracker.host, tracker.httpPort),
'tasklog',
None,
'attemptid=%s' % (self.attemptId,),
None))
LOGGER.info('Retrieving %s' % (url,))
try:
data = urllib2.urlopen(url)
except urllib2.URLError:
raise urllib2.URLError(_("Cannot retrieve logs from TaskTracker %(id)s.") % {'id': self.taskTrackerId})
et = lxml.html.parse(data)
log_sections = et.findall('body/pre')
logs = [section.text or '' for section in log_sections]
if len(logs) < 3:
LOGGER.warn('Error parsing task attempt log for %s at "%s". Found %d (not 3) log sections' %
(self.attemptId, url, len(log_sections)))
err = _("Hue encountered an error while retrieving logs from '%s'.") % (url,)
logs += [err] * (3 - len(logs))
return logs
class Tracker(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo.
"""
return getattr(self, item)
@staticmethod
def from_name(jt, trackername):
return Tracker(jt.task_tracker(trackername))
def __init__(self, thrifttracker):
self.tracker = thrifttracker
self._init_attributes();
def _init_attributes(self):
self.trackerId = self.tracker.trackerName
self.httpPort = self.tracker.httpPort
self.host = self.tracker.host
self.lastSeenMs = self.tracker.lastSeen
self.lastSeenFormatted = format_unixtime_ms(self.tracker.lastSeen)
self.totalVirtualMemory = self.tracker.totalVirtualMemory
self.totalPhysicalMemory = self.tracker.totalPhysicalMemory
self.availableSpace = self.tracker.availableSpace
self.failureCount = self.tracker.failureCount
self.mapCount = self.tracker.mapCount
self.reduceCount = self.tracker.reduceCount
self.maxMapTasks = self.tracker.maxMapTasks
self.maxReduceTasks = self.tracker.maxReduceTasks
self.taskReports = self.tracker.taskReports
self.is_mr2 = False
class Cluster(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
def __init__(self, jt):
self.status = jt.cluster_status()
self._init_attributes();
def _init_attributes(self):
self.mapTasksInProgress = self.status.mapTasks
self.reduceTasksInProgress = self.status.reduceTasks
self.maxMapTasks = self.status.maxMapTasks
self.maxReduceTasks = self.status.maxReduceTasks
self.usedHeapMemory = self.status.usedMemory
self.maxHeapMemory = self.status.maxMemory
self.clusterStartTimeMs = self.status.startTime
self.clusterStartTimeFormatted = format_unixtime_ms(self.status.startTime)
self.identifier = self.status.identifier
self.taskTrackerExpiryInterval = self.status.taskTrackerExpiryInterval
self.totalJobSubmissions = self.status.totalSubmissions
self.state = self.status.stateAsString
self.numActiveTrackers = self.status.numActiveTrackers
self.activeTrackerNames = self.status.activeTrackerNames
self.numBlackListedTrackers = self.status.numBlacklistedTrackers
self.blacklistedTrackerNames = self.status.blacklistedTrackerNames
self.hostname = self.status.hostname
self.httpPort = self.status.httpPort
class LinkJobLogs(object):
@classmethod
def _make_hdfs_links(cls, log):
escaped_logs = escape(log)
return re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
@classmethod
def _make_mr_links(cls, log):
escaped_logs = escape(log)
return re.sub('(job_[0-9_]+(/|\.)?)', LinkJobLogs._replace_mr_link, escaped_logs)
@classmethod
def _make_links(cls, log):
escaped_logs = escape(log)
hdfs_links = re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
return re.sub('(job_[0-9_]+(/|\.)?)', LinkJobLogs._replace_mr_link, hdfs_links)
@classmethod
def _replace_hdfs_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (location_to_url(match.group(0), strict=False), match.group(0))
except:
return match.group(0)
@classmethod
def _replace_mr_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (reverse('jobbrowser.views.single_job', kwargs={'job': match.group(0)}), match.group(0))
except:
return match.group(0)
def get_jobconf(jt, jobid):
"""
Returns a dict representation of the jobconf for the job corresponding
to jobid. filter_keys is an optional list of configuration keys to filter on.
"""
jid = jt.thriftjobid_from_string(jobid)
# This will throw if the the jobconf can't be found
xml_data = jt.get_job_xml(jid)
return confparse.ConfParse(xml_data)
def format_unixtime_ms(unixtime):
"""
Format a unix timestamp in ms to a human readable string
"""
if unixtime:
return str(datetime.datetime.fromtimestamp(unixtime/1000).strftime("%x %X %Z"))
else:
return ""
DOTS = re.compile("\.([a-z])")
def dots_to_camel_case(dots):
"""
Takes a string delimited with periods and returns a camel-case string.
Example: dots_to_camel_case("foo.bar.baz") //returns fooBarBaz
"""
def return_upper(match):
return match.groups()[0].upper()
return str(DOTS.sub(return_upper, dots))
def get_path(hdfs_url):
"""
Returns the path component of an HDFS url.
"""
# urlparse is lame, and only "uses_netloc" for a certain
# set of protocols. So we replace hdfs with gopher:
if hdfs_url.startswith("hdfs://"):
gopher_url = "gopher://" + hdfs_url[7:]
path = urlparse(gopher_url)[2] # path
return path
else:
return hdfs_url
| kalahbrown/HueBigSQL | apps/jobbrowser/src/jobbrowser/models.py | Python | apache-2.0 | 22,026 |
from rest_framework.serializers import ValidationError
from six import string_types
def has_id_field(value):
if value is None:
raise ValidationError('Nested object must contain an `id` attribute.')
if isinstance(value, string_types):
raise ValidationError(value)
| pombredanne/drf-nested-serializer | nested_serializers/validators.py | Python | bsd-2-clause | 288 |
#!/usr/bin/env python
from peyotl.utility import get_logger
import re
from peyotl.git_storage import GitActionBase
# extract an amendment id from a git repo path (as returned by git-tree)
_LOG = get_logger(__name__)
class MergeException(Exception):
pass
def get_filepath_for_id(repo_dir, amendment_id):
from peyotl.amendments import AMENDMENT_ID_PATTERN
assert bool(AMENDMENT_ID_PATTERN.match(amendment_id))
return '{r}/amendments/{s}.json'.format(r=repo_dir, s=amendment_id)
def amendment_id_from_repo_path(path):
doc_parent_dir = 'amendments/'
if path.startswith(doc_parent_dir):
try:
amendment_id = path.split(doc_parent_dir)[1]
return amendment_id
except:
return None
class TaxonomicAmendmentsGitAction(GitActionBase):
def __init__(self,
repo,
remote=None,
git_ssh=None,
pkey=None,
cache=None, # pylint: disable=W0613
path_for_doc_fn=None,
max_file_size=None):
"""GitActionBase subclass to interact with a Git repository
Example:
gd = TaxonomicAmendmentsGitAction(repo="/home/user/git/foo")
Note that this requires write access to the
git repository directory, so it can create a
lockfile in the .git directory.
"""
GitActionBase.__init__(self,
'amendment',
repo,
remote,
git_ssh,
pkey,
cache,
path_for_doc_fn,
max_file_size,
path_for_doc_id_fn=get_filepath_for_id)
# rename some generic members in the base class, for clarity and backward compatibility
@property
def path_for_amendment(self):
return self.path_for_doc
@property
def return_amendment(self):
return self.return_document
def get_changed_docs(self,
ancestral_commit_sha,
doc_ids_to_check=None):
return self._get_changed_docs(ancestral_commit_sha,
doc_id_from_repo_path=amendment_id_from_repo_path,
doc_ids_to_check=doc_ids_to_check)
def find_WIP_branches(self, amendment_id):
pat = re.compile(r'.*_amendment_{i}_[0-9]+'.format(i=amendment_id))
return self._find_WIP_branches(amendment_id, branch_pattern=pat)
def create_or_checkout_branch(self,
gh_user,
amendment_id,
parent_sha,
force_branch_name=False):
return self._create_or_checkout_branch(gh_user,
amendment_id,
parent_sha,
branch_name_template="{ghu}_amendment_{rid}",
force_branch_name=force_branch_name)
def remove_amendment(self, first_arg, sec_arg, third_arg, fourth_arg=None, commit_msg=None):
"""Remove an amendment
Given a amendment_id, branch and optionally an
author, remove an amendment on the given branch
and attribute the commit to author.
Returns the SHA of the commit on branch.
"""
if fourth_arg is None:
amendment_id, branch_name, author = first_arg, sec_arg, third_arg
gh_user = branch_name.split('_amendment_')[0]
parent_sha = self.get_master_sha()
else:
gh_user, amendment_id, parent_sha, author = first_arg, sec_arg, third_arg, fourth_arg
if commit_msg is None:
commit_msg = "Delete Amendment '%s' via OpenTree API" % amendment_id
return self._remove_document(gh_user, amendment_id, parent_sha, author, commit_msg)
def write_amendment(self, amendment_id, file_content, branch, author):
"""Given an amendment_id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master...
"""
gh_user = branch.split('_amendment_')[0]
msg = "Update Amendment '%s' via OpenTree API" % amendment_id
return self.write_document(gh_user,
amendment_id,
file_content,
branch, author,
commit_msg=msg)
def write_amendment_from_tmpfile(self, amendment_id, tmpfi, parent_sha, auth_info, commit_msg=''):
"""Given an amendment_id, temporary filename of content, branch and auth_info
"""
return self.write_doc_from_tmpfile(amendment_id,
tmpfi,
parent_sha,
auth_info,
commit_msg,
doctype_display_name="amendment")
| mtholder/peyotl | peyotl/amendments/git_actions.py | Python | bsd-2-clause | 5,259 |
"""
$url teamliquid.net
$url tl.net
$type live
"""
import logging
import re
from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugins.afreeca import AfreecaTV
from streamlink.plugins.twitch import Twitch
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?(?:tl|teamliquid)\.net/video/streams/"
))
class Teamliquid(Plugin):
def _get_streams(self):
res = self.session.http.get(self.url)
stream_address_re = re.compile(r'''href\s*=\s*"([^"]+)"\s*>\s*View on''')
stream_url_match = stream_address_re.search(res.text)
if stream_url_match:
stream_url = stream_url_match.group(1)
log.info("Attempting to play streams from {0}".format(stream_url))
p = urlparse(stream_url)
if p.netloc.endswith("afreecatv.com"):
self.stream_weight = AfreecaTV.stream_weight
elif p.netloc.endswith("twitch.tv"):
self.stream_weight = Twitch.stream_weight
return self.session.streams(stream_url)
__plugin__ = Teamliquid
| streamlink/streamlink | src/streamlink/plugins/teamliquid.py | Python | bsd-2-clause | 1,128 |
"""
@package mi.instrument.mclane.driver
@file marine-integrations/mi/instrument/mclane/driver.py
@author Dan Mergens
@brief Driver base class for McLane instruments
Release notes:
initial version
"""
import datetime
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
import re
import time
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.util import dict_equal
from mi.core.exceptions import SampleException, \
InstrumentParameterException, \
InstrumentProtocolException, \
InstrumentTimeoutException
from mi.core.instrument.instrument_protocol import \
CommandResponseInstrumentProtocol, \
RE_PATTERN, \
DEFAULT_CMD_TIMEOUT
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import \
DriverEvent, \
DriverAsyncEvent, \
DriverProtocolState, \
DriverParameter, \
ResourceAgentState
from mi.core.instrument.data_particle import \
DataParticle, \
DataParticleKey, \
CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
NEWLINE = '\r\n'
CONTROL_C = '\x03'
NUM_PORTS = 24 # number of collection bags
# default timeout.
INTER_CHARACTER_DELAY = .2 # works
# INTER_CHARACTER_DELAY = .02 - too fast
# INTER_CHARACTER_DELAY = .04
PUMP_RATE_ERROR = 1.15 # PPS is off in it's flow rate measurement by 14.5% - TODO - check RAS data
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
FLUSH = 'DRIVER_STATE_FLUSH'
FILL = 'DRIVER_STATE_FILL'
CLEAR = 'DRIVER_STATE_CLEAR'
RECOVERY = 'DRIVER_STATE_RECOVERY' # for recovery after pump failure
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
INIT_PARAMS = DriverEvent.INIT_PARAMS
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
# ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
FLUSH = 'DRIVER_EVENT_FLUSH'
FILL = 'DRIVER_EVENT_FILL'
CLEAR = 'DRIVER_EVENT_CLEAR'
PUMP_STATUS = 'DRIVER_EVENT_PUMP_STATUS'
INSTRUMENT_FAILURE = 'DRIVER_EVENT_INSTRUMENT_FAILURE'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
# ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
CLEAR = ProtocolEvent.CLEAR
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
FLUSH_VOLUME = "flush_volume"
FLUSH_FLOWRATE = "flush_flowrate"
FLUSH_MINFLOW = "flush_minflow"
FILL_VOLUME = "fill_volume"
FILL_FLOWRATE = "fill_flowrate"
FILL_MINFLOW = "fill_minflow"
CLEAR_VOLUME = "clear_volume"
CLEAR_FLOWRATE = "clear_flowrate"
CLEAR_MINFLOW = "clear_minflow"
class McLaneCommand(BaseEnum):
"""
Instrument command strings - case insensitive
"""
GO = NEWLINE
CONTROL_C = CONTROL_C
CLOCK = 'clock' # set the clock date and time
BATTERY = 'battery' # display battery voltage
HOME = 'home' # set the port to the home port (0)
FORWARD = 'forward' # start forward pump operation < volume flowrate minflow [time] >
REVERSE = 'reverse' # reverse pump operation < volume flowrate minflow [time] >
PORT = 'port' # display current port or set valve to supplied position
CAPACITY = 'capacity' # pump max flow rate mL/min
COPYRIGHT = 'copyright' # display version, release and copyright notice
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = '\r\n'
PERIOD = '.'
SUSPENDED = 'Suspended ... '
ENTER_CTRL_C = 'Enter ^C now to wake up ...'
COMMAND_INPUT = '>'
UNRECOGNIZED_COMMAND = '] unrecognized command'
class McLaneResponse(BaseEnum):
"""
Expected device response strings
"""
HOME = re.compile(r'Port: 00')
PORT = re.compile(r'Port: (\d+)') # e.g. Port: 01
# e.g. 03/25/14 20:24:02 PPS ML13003-01>
READY = re.compile(r'(\d+/\d+/\d+\s+\d+:\d+:\d+\s+)(RAS|PPS)\s+(.*)>')
# Result 00 | 75 100 25 4 | 77.2 98.5 99.1 47 031514 001813 | 29.8 1
# Result 00 | 10 100 75 60 | 10.0 85.5 100.0 7 032814 193855 | 30.0 1
PUMP = re.compile(r'(Status|Result).*(\d+)' + NEWLINE)
# Battery: 30.1V [Alkaline, 18V minimum]
BATTERY = re.compile(r'Battery:\s+(\d*\.\d+)V\s+\[.*\]') # battery voltage
# Capacity: Maxon 250mL
CAPACITY = re.compile(r'Capacity:\s(Maxon|Pittman)\s+(\d+)mL') # pump make and capacity
# McLane Research Laboratories, Inc.
# CF2 Adaptive Water Transfer System
# Version 2.02 of Jun 7 2013 18:17
# Configured for: Maxon 250ml pump
VERSION = re.compile(
r'McLane .*$' + NEWLINE +
r'CF2 .*$' + NEWLINE +
r'Version\s+(\S+)\s+of\s+(.*)$' + NEWLINE + # version and release date
r'.*$'
)
class Timeout(BaseEnum):
"""
Timeouts for commands # TODO - calculate based on flow rate & volume
"""
HOME = 30
PORT = 10 + 2 # average time to advance to next port is 10 seconds, any more indicates skipping of a port
FLUSH = 103 + 5
FILL = 2728 + 30
CLEAR = 68 + 5
CLOCK = INTER_CHARACTER_DELAY * 30 + 1
#####
# Codes for pump termination
TerminationCodes = {
0: 'Pumping in progress',
1: 'Volume reached',
2: 'Time limit reached',
3: 'Min flow reached',
4: 'Low battery',
5: 'Stopped by user',
6: 'Pump would not start',
7: 'Sudden flow obstruction',
8: 'Sudden obstruction with slip',
9: 'Sudden pressure release'
}
class TerminationCodeEnum(BaseEnum):
PUMP_IN_PROGRESS = 0
VOLUME_REACHED = 1
TIME_LIMIT_REACHED = 2
MIN_FLOW_REACHED = 3
LOW_BATTERY = 4
STOPPED_BY_USER = 5
PUMP_WOULD_NOT_START = 6
SUDDEN_FLOW_OBSTRUCTION = 7
SUDDEN_OBSTRUCTION_WITH_SLIP = 8
SUDDEN_PRESSURE_RELEASE = 9
class McLaneDataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
# TODO - define which commands will be published to user
RAW = CommonDataParticleType.RAW
MCLANE_PARSED = 'mclane_parsed'
PUMP_STATUS = 'pump_status'
VOLTAGE_STATUS = 'battery'
VERSION_INFO = 'version'
###############################################################################
# Data Particles
###############################################################################
class McLaneSampleDataParticleKey(BaseEnum):
PORT = 'port_number'
VOLUME_COMMANDED = 'commanded_volume'
FLOW_RATE_COMMANDED = 'commanded_flowrate'
MIN_FLOW_COMMANDED = 'commanded_min_flowrate'
TIME_LIMIT = 'commanded_timelimit'
VOLUME_ACTUAL = 'cumulative_volume'
FLOW_RATE_ACTUAL = 'flowrate'
MIN_FLOW_ACTUAL = 'min_flowrate'
TIMER = 'elapsed_time'
TIME = 'date_time_string'
BATTERY = 'battery_voltage'
CODE = 'sampling_status_code'
# data particle for forward, reverse, and result commands
# e.g.:
# --- command --- -------- result -------------
# Result port | vol flow minf tlim | vol flow minf secs date-time | batt code
# Status 00 | 75 100 25 4 | 1.5 90.7 90.7* 1 031514 001727 | 29.9 0
class McLaneSampleDataParticle(DataParticle):
@staticmethod
def regex():
"""
get the compiled regex pattern
@return: compiled re
"""
exp = str(r'(?P<status>Status|Result)' + # status is incremental, result is the last return from the command
'\s*(?P<port>\d+)\s*\|' + # PORT
'\s*(?P<commanded_volume>\d+)' + # VOLUME_COMMANDED
'\s*(?P<commanded_flow_rate>\d+)' + # FLOW RATE COMMANDED
'\s*(?P<commanded_min_flowrate>\d+)' + # MIN RATE COMMANDED
'\s*(?P<time_limit>\d+)\s*\|' + # TLIM - TODO
'\s*(?P<volume>\d*\.?\d+)' + # VOLUME (actual)
'\s*(?P<flow_rate>\d*\.?\d+)' + # FLOW RATE (actual)
'\s*(?P<min_flow>\d*\.?\d+)' + # MIN RATE (actual)
'\*?' +
'\s*(?P<timer>\d+)' + # elapsed time (seconds)
'\s*(?P<time>\d+\s*\d+)\s*\|' + # MMDDYY HHMMSS (current date and time)
'\s*(?P<voltage>\d*\.?\d+)' + # voltage (battery)
'\s*(?P<code>\d+)' + # code enumeration
'\s*' + NEWLINE)
return exp
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(McLaneSampleDataParticle.regex())
def _build_parsed_values(self):
match = McLaneSampleDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("RASFL_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.PORT,
DataParticleKey.VALUE: int(match.group('port'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.VOLUME_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_volume'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.FLOW_RATE_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_flow_rate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.MIN_FLOW_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_min_flowrate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIME_LIMIT,
DataParticleKey.VALUE: int(match.group('time_limit'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.VOLUME_ACTUAL,
DataParticleKey.VALUE: float(match.group('volume'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.FLOW_RATE_ACTUAL,
DataParticleKey.VALUE: float(match.group('flow_rate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.MIN_FLOW_ACTUAL,
DataParticleKey.VALUE: float(match.group('min_flow'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIMER,
DataParticleKey.VALUE: int(match.group('timer'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIME,
DataParticleKey.VALUE: str(match.group('time'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.BATTERY,
DataParticleKey.VALUE: float(match.group('voltage'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.CODE,
DataParticleKey.VALUE: int(match.group('code'))}]
return result
###########################################################################
# Protocol
###########################################################################
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class McLaneProtocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
# __metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_unknown_enter),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.INIT_PARAMS, self._handler_command_init_params),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.CLOCK_SYNC, self._handler_sync_clock),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_command_acquire),
# (ProtocolEvent.ACQUIRE_STATUS, self._handler_command_status),
(ProtocolEvent.CLEAR, self._handler_command_clear),
(ProtocolEvent.GET, self._handler_get),
(ProtocolEvent.SET, self._handler_command_set),
],
ProtocolState.FLUSH: [
(ProtocolEvent.ENTER, self._handler_flush_enter),
(ProtocolEvent.FLUSH, self._handler_flush_flush),
(ProtocolEvent.PUMP_STATUS, self._handler_flush_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.FILL: [
(ProtocolEvent.ENTER, self._handler_fill_enter),
(ProtocolEvent.FILL, self._handler_fill_fill),
(ProtocolEvent.PUMP_STATUS, self._handler_fill_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.CLEAR: [
(ProtocolEvent.ENTER, self._handler_clear_enter),
(ProtocolEvent.CLEAR, self._handler_clear_clear),
(ProtocolEvent.PUMP_STATUS, self._handler_clear_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.RECOVERY: [
(ProtocolEvent.ENTER, self._handler_recovery_enter),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Add build handlers for device commands - we are only using simple commands
for cmd in McLaneCommand.list():
self._add_build_handler(cmd, self._build_command)
# Add response handlers for device commands.
# self._add_response_handler(McLaneCommand.BATTERY, self._parse_battery_response)
# self._add_response_handler(McLaneCommand.CLOCK, self._parse_clock_response)
# self._add_response_handler(McLaneCommand.PORT, self._parse_port_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(McLaneProtocol.sieve_function)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._sent_cmds = None
# TODO - reset next_port on mechanical refresh of the PPS filters - how is the driver notified?
# TODO - need to persist state for next_port to save driver restart
self.next_port = 1 # next available port
self._second_attempt = False
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(McLaneSampleDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# implement virtual methods from base class.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters. If
startup is set to true that means we are setting startup values
and immutable parameters can be set. Otherwise only READ_WRITE
parameters can be set.
must be overloaded in derived classes
@param params dictionary containing parameter name and value pairs
@param startup flag - true indicates initializing, false otherwise
"""
params = args[0]
# check for attempt to set readonly parameters (read-only or immutable set outside startup)
self._verify_not_readonly(*args, **kwargs)
old_config = self._param_dict.get_config()
for (key, val) in params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
self._param_dict.set_value(key, val)
new_config = self._param_dict.get_config()
log.debug('new config: %s\nold config: %s', new_config, old_config)
# check for parameter change
if not dict_equal(old_config, new_config):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def apply_startup_params(self):
"""
Apply startup parameters
"""
# fn = "apply_startup_params"
# config = self.get_startup_config()
# log.debug("%s: startup config = %s", fn, config)
#
# for param in Parameter.list():
# if param in config:
# self._param_dict.set_value(param, config[param])
#
# log.debug("%s: new parameters", fn)
# for x in config:
# log.debug(" parameter %s: %s", x, config[x])
if self.get_current_state() != DriverProtocolState.COMMAND:
raise InstrumentProtocolException('cannot set parameters outside command state')
self._set_params(self.get_startup_config(), True)
########################################################################
# Instrument commands.
########################################################################
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Perform a command-response on the device. Overrides the base class so it will
return the regular expression groups without concatenating them into a string.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param write_delay kwarg for the amount of delay in seconds to pause
between each character. If none supplied, the DEFAULT_WRITE_DELAY
value will be used.
@param timeout optional wakeup and command timeout via kwargs.
@param response_regex kwarg with a compiled regex for the response to
match. Groups that match will be returned as a tuple.
@retval response The parsed response result.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
response_regex = kwargs.get('response_regex', None) # required argument
write_delay = INTER_CHARACTER_DELAY
retval = None
if not response_regex:
raise InstrumentProtocolException('missing required keyword argument "response_regex"')
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
# Get the build handler.
build_handler = self._build_handlers.get(cmd, None)
if not build_handler:
raise InstrumentProtocolException('Cannot build command: %s' % cmd)
cmd_line = build_handler(cmd, *args)
# Wakeup the device, pass up exception if timeout
prompt = self._wakeup(timeout)
# Clear line and prompt buffers for result.
self._linebuf = ''
self._promptbuf = ''
# Send command.
log.debug('_do_cmd_resp: %s, timeout=%s, write_delay=%s, response_regex=%s',
repr(cmd_line), timeout, write_delay, response_regex)
for char in cmd_line:
self._connection.send(char)
time.sleep(write_delay)
# Wait for the prompt, prepare result and return, timeout exception
return self._get_response(timeout, response_regex=response_regex)
def _do_cmd_home(self):
"""
Move valve to the home port
@retval True if successful, False if unable to return home
"""
func = '_do_cmd_home'
log.debug('--- djm --- command home')
port = int(self._do_cmd_resp(McLaneCommand.PORT, response_regex=McLaneResponse.PORT)[0])
log.debug('--- djm --- at port: %d', port)
if port != 0:
log.debug('--- djm --- going home')
self._do_cmd_resp(McLaneCommand.HOME, response_regex=McLaneResponse.HOME, timeout=Timeout.HOME)
port = int(self._do_cmd_resp(McLaneCommand.PORT, response_regex=McLaneResponse.PORT)[0])
if port != 0:
log.error('Unable to return to home port')
return False
return True
def _do_cmd_flush(self, *args, **kwargs):
"""
Flush the home port in preparation for collecting a sample. This clears the intake port so that
the sample taken will be new.
This only starts the flush. The remainder of the flush is monitored by got_chunk.
"""
flush_volume = self._param_dict.get(Parameter.FLUSH_VOLUME)
flush_flowrate = self._param_dict.get(Parameter.FLUSH_FLOWRATE)
flush_minflow = self._param_dict.get(Parameter.FLUSH_MINFLOW)
if not self._do_cmd_home():
self._async_raise_fsm_event(ProtocolEvent.INSTRUMENT_FAILURE)
log.debug('--- djm --- flushing home port, %d %d %d',
flush_volume, flush_flowrate, flush_flowrate)
self._do_cmd_no_resp(McLaneCommand.FORWARD, flush_volume, flush_flowrate, flush_minflow)
def _do_cmd_fill(self, *args, **kwargs):
"""
Fill the sample at the next available port
"""
log.debug('--- djm --- collecting sample in port %d', self.next_port)
fill_volume = self._param_dict.get(Parameter.FILL_VOLUME)
fill_flowrate = self._param_dict.get(Parameter.FILL_FLOWRATE)
fill_minflow = self._param_dict.get(Parameter.FILL_MINFLOW)
log.debug('--- djm --- collecting sample in port %d', self.next_port)
reply = self._do_cmd_resp(McLaneCommand.PORT, self.next_port, response_regex=McLaneResponse.PORT)
log.debug('--- djm --- port returned:\n%r', reply)
self.next_port += 1 # succeed or fail, we can't use this port again
# TODO - commit next_port to the agent for persistent data store
self._do_cmd_no_resp(McLaneCommand.FORWARD, fill_volume, fill_flowrate, fill_minflow)
def _do_cmd_clear(self, *args, **kwargs):
"""
Clear the home port
"""
self._do_cmd_home()
clear_volume = self._param_dict.get(Parameter.CLEAR_VOLUME)
clear_flowrate = self._param_dict.get(Parameter.CLEAR_FLOWRATE)
clear_minflow = self._param_dict.get(Parameter.CLEAR_MINFLOW)
log.debug('--- djm --- clearing home port, %d %d %d',
clear_volume, clear_flowrate, clear_minflow)
self._do_cmd_no_resp(McLaneCommand.REVERSE, clear_volume, clear_flowrate, clear_minflow)
########################################################################
# Generic handlers.
########################################################################
def _handler_pass(self, *args, **kwargs):
pass
def _handler_all_failure(self, *args, **kwargs):
log.error('Instrument failure detected. Entering recovery mode.')
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
# TODO - read persistent data (next port)
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
# force to command mode, this instrument has no autosample mode
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Flush
########################################################################
def _handler_flush_enter(self, *args, **kwargs):
"""
Enter the flush state. Trigger FLUSH event.
"""
log.debug('--- djm --- entering FLUSH state')
self._second_attempt = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.FLUSH)
def _handler_flush_flush(self, *args, **kwargs):
"""
Begin flushing the home port. Subsequent flushing will be monitored and sent to the flush_pump_status
handler.
"""
log.debug('--- djm --- in FLUSH state')
next_state = ProtocolState.FILL
next_agent_state = ResourceAgentState.BUSY
# 2. Set to home port
# 3. flush intake (home port)
# 4. wait 30 seconds
# 1. Get next available port (if no available port, bail)
log.debug('--- djm --- Flushing home port')
self._do_cmd_flush()
return None, (ResourceAgentState.BUSY, None)
def _handler_flush_pump_status(self, *args, **kwargs):
"""
Manage pump status update during flush. Status updates indicate continued pumping, Result updates
indicate completion of command. Check the termination code for success.
@args match object containing the regular expression match of the status line.
"""
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
next_state = None
next_agent_state = None
log.debug('--- djm --- received pump status: pump status: %s, code: %d', pump_status, code)
if pump_status == 'Result':
log.debug('--- djm --- flush completed - %s', TerminationCodes[code])
if code == TerminationCodeEnum.SUDDEN_FLOW_OBSTRUCTION:
log.info('Encountered obstruction during flush, attempting to clear')
self._async_raise_fsm_event(ProtocolEvent.CLEAR)
else:
next_state = ProtocolState.FILL
next_agent_state = ResourceAgentState.BUSY
# elif pump_status == 'Status':
return next_state, next_agent_state
def _handler_flush_clear(self, *args, **kwargs):
"""
Attempt to clear home port after stoppage has occurred during flush.
This is only performed once. On the second stoppage, the driver will enter recovery mode.
"""
log.debug('--- djm --- handling clear request during flush')
if self._second_attempt:
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
self._second_attempt = True
self._do_cmd_clear()
return None, None
########################################################################
# Fill
########################################################################
def _handler_fill_enter(self, *args, **kwargs):
"""
Enter the fill state. Trigger FILL event.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.FILL)
def _handler_fill_fill(self, *args, **kwargs):
"""
Send the fill command and process the first response
"""
next_state = None
next_agent_state = None
result = None
log.debug('Entering PHIL PHIL')
# 5. switch to collection port (next available)
# 6. collect sample (4000 ml)
# 7. wait 2 minutes
if self.next_port > NUM_PORTS:
log.error('Unable to collect RAS sample - %d containers full', NUM_PORTS)
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
else:
self._do_cmd_fill()
return next_state, (next_agent_state, result)
def _handler_fill_pump_status(self, *args, **kwargs):
"""
Process pump status updates during filter collection.
"""
next_state = None
next_agent_state = None
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
if pump_status == 'Result':
if code != TerminationCodeEnum.VOLUME_REACHED:
next_state = ProtocolState.RECOVERY
next_state = ProtocolState.CLEAR # all done
# if pump_status == 'Status':
# TODO - check for bag rupture (> 93% flow rate near end of sample collect- RAS only)
return next_state, next_agent_state
########################################################################
# Clear
########################################################################
def _handler_clear_enter(self, *args, **kwargs):
"""
Enter the clear state. Trigger the CLEAR event.
"""
self._second_attempt = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.CLEAR)
def _handler_clear_clear(self, *args, **kwargs):
"""
Send the clear command. If there is an obstruction trigger a FLUSH, otherwise place driver in RECOVERY mode.
"""
log.debug('--- djm --- clearing home port')
# 8. return to home port
# 9. reverse flush 75 ml to pump water from exhaust line through intake line
self._do_cmd_clear()
return None, None
def _handler_clear_pump_status(self, *args, **kwargs):
"""
Parse pump status during clear action.
"""
next_state = None
next_agent_state = None
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
if pump_status == 'Result':
if code != TerminationCodeEnum.VOLUME_REACHED:
log.error('Encountered obstruction during clear. Attempting flush...')
self._async_raise_fsm_event(ProtocolEvent.FLUSH)
else:
log.debug('--- djm --- clear complete')
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
# if Status, nothing to do
return next_state, next_agent_state
def _handler_clear_flush(self, *args, **kwargs):
"""
Attempt to recover from failed attempt to clear by flushing home port. Only try once.
"""
log.info('Attempting to flush main port during clear')
if self._second_attempt:
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
self._second_attempt = True
self._do_cmd_flush()
return None, None
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# Command device to update parameters and send a config change event if needed.
self._update_params()
self._protocol_fsm.on_event(ProtocolEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_init_params(self, *args, **kwargs):
"""
Setup initial parameters.
"""
self._init_params()
return None, None
def _handler_command_set(self, *args, **kwargs):
"""
Set instrument parameters
"""
log.debug('handler command set called')
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('set command requires a parameter dictionary.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('set parameters is not a dictionary')
self._set_params(params, startup)
return None, None
# changed = False
# for key, value in params.items():
# log.info('Command:set - setting parameter %s to %s', key, value)
# if not Parameter.has(key):
# raise InstrumentProtocolException('Attempt to set undefined parameter: %s', key)
# old_value = self._param_dict.get(key)
# if old_value != value:
# changed = True
# self._param_dict.set_value(key, value)
#
# if changed:
# self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
#
# next_state = None
# result = None
# return next_state, result
def _handler_command_start_direct(self, *args, **kwargs):
"""
Start direct access.
"""
log.debug('--- djm --- entered _handler_command_start_direct with args: %s', args)
result = None
next_state = ProtocolState.DIRECT_ACCESS
next_agent_state = ResourceAgentState.DIRECT_ACCESS
return next_state, (next_agent_state, result)
########################################################################
# Recovery handlers.
########################################################################
# TODO - not sure how to determine how to exit from this state. Probably requires a driver reset.
def _handler_recovery_enter(self, *args, **kwargs):
"""
Error recovery mode. The instrument failed to respond to a command and now requires the user to perform
diagnostics and correct before proceeding.
"""
log.debug('--- djm --- entered recovery mode')
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
self._do_cmd_direct(data)
return None, None
def _handler_direct_access_stop_direct(self, *args, **kwargs):
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
########################################################################
# general handlers.
########################################################################
def get_timestamp_delayed(self, fmt, delay=0):
"""
Return a formatted date string of the current utc time,
but the string return is delayed until the next second
transition.
Formatting:
http://docs.python.org/library/time.html#time.strftime
@param fmt: strftime() format string
@return: formatted date string
@raise ValueError if format is None
"""
if not fmt:
raise ValueError
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)
time.sleep((1e6 - now.microsecond) / 1e6)
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)
return now.strftime(fmt)
def _handler_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
cmd_len = len('clock 03/20/2014 17:14:55' + NEWLINE)
delay = cmd_len * INTER_CHARACTER_DELAY
time_format = "%m/%d/%Y %H:%M:%S"
str_val = self.get_timestamp_delayed(time_format, delay)
# str_val = time.strftime(time_format, time.gmtime(time.time() + self._clock_set_offset))
log.debug("Setting instrument clock to '%s'", str_val)
ras_time = self._do_cmd_resp(McLaneCommand.CLOCK, str_val, response_regex=McLaneResponse.READY)[0]
return None, (None, {'time': ras_time})
def _handler_command_acquire(self, *args, **kwargs):
self._handler_sync_clock()
return ProtocolState.FLUSH, ResourceAgentState.BUSY
# def _handler_command_status(self, *args, **kwargs):
# # get the following:
# # - VERSION
# # - CAPACITY (pump flow)
# # - BATTERY
# # - CODES (termination codes)
# # - COPYRIGHT (termination codes)
# return None, ResourceAgentState.COMMAND
def _handler_command_clear(self, *args, **kwargs):
return ProtocolState.CLEAR, ResourceAgentState.BUSY
########################################################################
# Private helpers.
########################################################################
def _wakeup(self, wakeup_timeout=10, response_timeout=3):
"""
Over-written because waking this instrument up is a multi-step process with
two different requests required
@param wakeup_timeout The timeout to wake the device.
@param response_timeout The time to look for response to a wakeup attempt.
@throw InstrumentTimeoutException if the device could not be woken.
"""
sleep_time = .1
command = McLaneCommand.GO
# Grab start time for overall wakeup timeout.
starttime = time.time()
while True:
# Clear the prompt buffer.
log.debug("_wakeup: clearing promptbuf: %s", self._promptbuf)
self._promptbuf = ''
# Send a command and wait delay amount for response.
log.debug('_wakeup: Sending command %s, delay=%s', command.encode("hex"), response_timeout)
for char in command:
self._connection.send(char)
time.sleep(INTER_CHARACTER_DELAY)
sleep_amount = 0
while True:
time.sleep(sleep_time)
if self._promptbuf.find(Prompt.COMMAND_INPUT) != -1:
# instrument is awake
log.debug('_wakeup: got command input prompt %s', Prompt.COMMAND_INPUT)
# add inter-character delay which _do_cmd_resp() incorrectly doesn't add to
# the start of a transmission
time.sleep(INTER_CHARACTER_DELAY)
return Prompt.COMMAND_INPUT
if self._promptbuf.find(Prompt.ENTER_CTRL_C) != -1:
command = McLaneCommand.CONTROL_C
break
if self._promptbuf.find(Prompt.PERIOD) == 0:
command = McLaneCommand.CONTROL_C
break
sleep_amount += sleep_time
if sleep_amount >= response_timeout:
log.debug("_wakeup: expected response not received, buffer=%s", self._promptbuf)
break
if time.time() > starttime + wakeup_timeout:
raise InstrumentTimeoutException(
"_wakeup(): instrument failed to wakeup in %d seconds time" % wakeup_timeout)
def _build_command(self, cmd, *args):
return cmd + ' ' + ' '.join([str(x) for x in args]) + NEWLINE
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="synchronize clock")
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.FLUSH_VOLUME,
r'Flush Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=150,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="flush_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FLUSH_FLOWRATE,
r'Flush Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="flush_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FLUSH_MINFLOW,
r'Flush Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="flush_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_VOLUME,
r'Fill Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=4000,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="fill_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_FLOWRATE,
r'Fill Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="fill_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_MINFLOW,
r'Fill Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="fill_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_VOLUME,
r'Reverse Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=100,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="clear_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_FLOWRATE,
r'Reverse Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="clear_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_MINFLOW,
r'Reverse Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="clear_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# def _parse_battery_response(self, response, prompt):
# """
# Parse handler for battery command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if battery command misunderstood.
# """
# log.debug("_parse_battery_response: response=%s, prompt=%s", response, prompt)
# if prompt == Prompt.UNRECOGNIZED_COMMAND:
# raise InstrumentProtocolException('battery command not recognized: %s.' % response)
#
# if not self._param_dict.update(response):
# raise InstrumentProtocolException('battery command not parsed: %s.' % response)
#
# return
#
# def _parse_clock_response(self, response, prompt):
# """
# Parse handler for clock command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if clock command misunderstood.
# @retval the joined string from the regular expression match
# """
# # extract current time from response
# log.debug('--- djm --- parse_clock_response: response: %r', response)
# ras_time_string = ' '.join(response.split()[:2])
# time_format = "%m/%d/%y %H:%M:%S"
# ras_time = time.strptime(ras_time_string, time_format)
# ras_time = list(ras_time)
# ras_time[-1] = 0 # tm_isdst field set to 0 - using GMT, no DST
#
# return tuple(ras_time)
#
# def _parse_port_response(self, response, prompt):
# """
# Parse handler for port command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if port command misunderstood.
# @retval the joined string from the regular expression match
# """
# # extract current port from response
# log.debug('--- djm --- parse_port_response: response: %r', response)
# port = int(response)
#
# return port
| ooici/marine-integrations | mi/instrument/mclane/driver.py | Python | bsd-2-clause | 50,088 |
from __future__ import unicode_literals
from django.template.defaultfilters import slugify as django_slugify
from django.utils.importlib import import_module
from unidecode import unidecode
# Timezone support with fallback.
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
def slugify(s):
"""
Translates unicode into closest possible ascii chars before
slugifying.
"""
from future.builtins import str
return django_slugify(unidecode(str(s)))
def unique_slug(manager, slug_field, slug):
"""
Ensure slug is unique for the given manager, appending a digit
if it isn't.
"""
i = 0
while True:
if i > 0:
if i > 1:
slug = slug.rsplit("-", 1)[0]
slug = "%s-%s" % (slug, i)
if not manager.filter(**{slug_field: slug}):
break
i += 1
return slug
def split_choices(choices_string):
"""
Convert a comma separated choices string to a list.
"""
return [x.strip() for x in choices_string.split(",") if x.strip()]
def html5_field(name, base):
"""
Takes a Django form field class and returns a subclass of
it with the given name as its input type.
"""
return type(str(""), (base,), {"input_type": name})
def import_attr(path):
"""
Given a a Python dotted path to a variable in a module,
imports the module and returns the variable in it.
"""
module_path, attr_name = path.rsplit(".", 1)
return getattr(import_module(module_path), attr_name)
| JostCrow/django-forms-builder | forms_builder/forms/utils.py | Python | bsd-2-clause | 1,599 |
# coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple("ExtType", "code data")):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super().__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| toobaz/pandas | pandas/io/msgpack/__init__.py | Python | bsd-3-clause | 1,223 |
from django.conf.urls import include, url
from django.shortcuts import redirect
from olympia.stats.urls import stats_patterns
from . import views
ADDON_ID = r"""(?P<addon_id>[^/<>"']+)"""
# These will all start with /addon/<addon_id>/
detail_patterns = [
url('^$', views.addon_detail, name='addons.detail'),
url('^more$', views.addon_detail, name='addons.detail_more'),
url('^eula/(?P<file_id>\d+)?$', views.eula, name='addons.eula'),
url('^license/(?P<version>[^/]+)?', views.license, name='addons.license'),
url('^privacy/', views.privacy, name='addons.privacy'),
url('^abuse/', views.report_abuse, name='addons.abuse'),
url('^reviews/', include('olympia.ratings.urls')),
url('^statistics/', include(stats_patterns)),
url('^versions/', include('olympia.versions.urls')),
# Old contribution urls
url('^developers$',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.meet'),
url('^contribute/roadblock/',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.roadblock'),
url('^contribute/installed/',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.installed'),
url('^contribute/thanks',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.thanks'),
url('^contribute/$',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.contribute'),
url('^contribute/(?P<status>cancel|complete)$',
lambda r, addon_id, status: redirect('addons.detail',
addon_id, permanent=True),
name='addons.contribute_status'),
url('^about$',
lambda r, addon_id: redirect('addons.detail',
addon_id, permanent=True),
name='addons.about'),
]
urlpatterns = [
# Promo modules for the homepage
url('^i/promos$', views.homepage_promos, name='addons.homepage_promos'),
# See https://github.com/mozilla/addons-server/issues/3130
# Hardcode because there is no relation from blocklist items and the
# add-on they block :-(
url('^addon/icloud-bookmarks/$', views.icloud_bookmarks_redirect,
name='addons.icloudbookmarksredirect'),
# URLs for a single add-on.
url('^addon/%s/' % ADDON_ID, include(detail_patterns)),
# Remora EULA and Privacy policy URLS
url('^addons/policy/0/(?P<addon_id>\d+)/(?P<file_id>\d+)',
lambda r, addon_id, file_id: redirect(
'addons.eula', addon_id, file_id, permanent=True)),
url('^addons/policy/0/(?P<addon_id>\d+)/',
lambda r, addon_id: redirect(
'addons.privacy', addon_id, permanent=True)),
url('^versions/license/(\d+)$', views.license_redirect),
url('^find-replacement/$', views.find_replacement_addon,
name='addons.find_replacement'),
]
| lavish205/olympia | src/olympia/addons/urls.py | Python | bsd-3-clause | 3,177 |
#!/usr/bin/env/python
import sys
import argparse
from bokpipe.bokastrom import scamp_solve
parser = argparse.ArgumentParser()
parser.add_argument("image",type=str,
help="input FITS image")
parser.add_argument("catalog",type=str,
help="input FITS catalog")
parser.add_argument("-a","--args",type=str,
help="arguments to pass to scamp config")
parser.add_argument("-f","--filter",type=str,default='g',
help="reference band")
parser.add_argument("-p","--plots",action="store_true",
help="write check plots")
parser.add_argument("-r","--reference",type=str,default=None,
help="reference catalog")
parser.add_argument('-v','--verbose',action='count',
help='increase output verbosity')
parser.add_argument("-w","--write",action="store_true",
help="write WCS to image header")
parser.add_argument("--single",action="store_true",
help="single pass")
args = parser.parse_args()
kwargs = {}
if args.args is not None:
arglist = args.args.split()
for a in arglist:
k,v = a.split('=')
kwargs[k] = v
scamp_solve(args.image,args.catalog,refStarCatFile=args.reference,
filt=args.filter,savewcs=args.write,clobber=True,
check_plots=args.plots,twopass=not args.single,
verbose=args.verbose,**kwargs)
| legacysurvey/rapala | bokpipe/tools/bokwcs.py | Python | bsd-3-clause | 1,410 |
from os import path
__version__ = '0.0.22'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
def setup(app):
app.add_html_theme(
'divio_docs_theme',
path.abspath(path.dirname(__file__))
)
| halfflat/nestmc-proto | doc/scripts/divio_docs_theme/__init__.py | Python | bsd-3-clause | 352 |
"""
sentry.db.models
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from copy import copy
import logging
import six
from bitfield.types import BitHandler
from django.db import models
from django.db.models import signals
from django.db.models.query_utils import DeferredAttribute
from django.utils import timezone
from .fields.bounded import BoundedBigAutoField
from .manager import BaseManager
from .query import update
__all__ = ('BaseModel', 'Model', 'sane_repr')
UNSAVED = object()
DEFERRED = object()
def sane_repr(*attrs):
if 'id' not in attrs and 'pk' not in attrs:
attrs = ('id', ) + attrs
def _repr(self):
cls = type(self).__name__
pairs = ('%s=%s' % (a, repr(getattr(self, a, None))) for a in attrs)
return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs))
return _repr
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseManager()
update = update
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __hash__(self):
# Django decided that it shouldnt let us hash objects even though they have
# memory addresses. We need that behavior, so let's revert.
if self.pk:
return models.Model.__hash__(self)
return id(self)
def __reduce__(self):
(model_unpickle, stuff, _) = super(BaseModel, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(type(field).__dict__.get(field.attname), DeferredAttribute):
return DEFERRED
if isinstance(field, models.ForeignKey):
return getattr(self, field.column, None)
return getattr(self, field.attname, None)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
data = {}
for f in self._meta.fields:
# XXX(dcramer): this is how Django determines this (copypasta from Model)
if isinstance(type(f).__dict__.get(f.attname),
DeferredAttribute) or f.column is None:
continue
try:
v = self.__get_field_value(f)
except AttributeError as e:
# this case can come up from pickling
logging.exception(six.text_type(e))
else:
if isinstance(v, BitHandler):
v = copy(v)
data[f.column] = v
self.__data = data
else:
self.__data = UNSAVED
def _update_timestamps(self):
if hasattr(self, 'date_updated'):
self.date_updated = timezone.now()
def has_changed(self, field_name):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
field = self._meta.get_field(field_name)
value = self.__get_field_value(field)
if value is DEFERRED:
return False
return self.__data.get(field_name) != value
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is UNSAVED:
return None
value = self.__data.get(field_name)
if value is DEFERRED:
return None
return self.__data.get(field_name)
class Model(BaseModel):
id = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr('id')
def __model_post_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_tracked_data()
def __model_pre_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_timestamps()
def __model_class_prepared(sender, **kwargs):
if not issubclass(sender, BaseModel):
return
if not hasattr(sender, '__core__'):
raise ValueError('{!r} model has not defined __core__'.format(sender))
signals.pre_save.connect(__model_pre_save)
signals.post_save.connect(__model_post_save)
signals.class_prepared.connect(__model_class_prepared)
| looker/sentry | src/sentry/db/models/base.py | Python | bsd-3-clause | 4,686 |
import copy
from corehq.pillows.case import CasePillow
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_MAPPING, REPORT_CASE_INDEX
from django.conf import settings
from .base import convert_property_dict
class ReportCasePillow(CasePillow):
"""
Simple/Common Case properties Indexer
an extension to CasePillow that provides for indexing of custom case properties
"""
es_alias = "report_cases"
es_type = "report_case"
es_index = REPORT_CASE_INDEX
default_mapping = REPORT_CASE_MAPPING
def get_unique_id(self):
return self.calc_meta()
def change_transform(self, doc_dict):
if self.get_domain(doc_dict) not in getattr(settings, 'ES_CASE_FULL_INDEX_DOMAINS', []):
#full indexing is only enabled for select domains on an opt-in basis
return None
doc_ret = copy.deepcopy(doc_dict)
convert_property_dict(doc_ret, self.default_mapping, override_root_keys=['_id', 'doc_type', '_rev', '#export_tag'])
return doc_ret
| puttarajubr/commcare-hq | corehq/pillows/reportcase.py | Python | bsd-3-clause | 1,031 |
#!/usr/bin/env python3
import socket
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 5555))
s.listen(1)
while 1:
conn, addr = s.accept()
#data = conn.recv(1024)
conn.send("blabla".encode())
conn.close()
if __name__ == '__main__':
main()
| polaris-gslb/polaris-core | tests/tcp_listen.py | Python | bsd-3-clause | 343 |
from django.dispatch import Signal
page_published = Signal(providing_args=['instance', 'revision'])
page_unpublished = Signal(providing_args=['instance'])
pre_page_move = Signal(providing_args=['instance', 'parent_page_before', 'parent_page_after', 'url_path_before', 'url_path_after'])
post_page_move = Signal(providing_args=['instance', 'parent_page_before', 'parent_page_after', 'url_path_before', 'url_path_after'])
workflow_approved = Signal(providing_args=['instance', 'user'])
workflow_rejected = Signal(providing_args=['instance', 'user'])
workflow_cancelled = Signal(providing_args=['instance', 'user'])
workflow_submitted = Signal(providing_args=['instance', 'user'])
task_approved = Signal(providing_args=['instance', 'user'])
task_rejected = Signal(providing_args=['instance', 'user'])
task_submitted = Signal(providing_args=['instance', 'user'])
task_cancelled = Signal(providing_args=['instance' 'user'])
| kaedroho/wagtail | wagtail/core/signals.py | Python | bsd-3-clause | 923 |
#!/usr/bin/env python
# coding=utf-8
import os
import datetime
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from v2ex.babel import Member
from v2ex.babel import Counter
from v2ex.babel import Section
from v2ex.babel import Node
from v2ex.babel import Topic
from v2ex.babel import Reply
from v2ex.babel.da import *
template.register_template_library('v2ex.templatetags.filters')
class FeedHomeHandler(webapp.RequestHandler):
def head(self):
self.response.out.write('')
def get(self):
site = GetSite()
output = memcache.get('feed_index')
if output is None:
template_values = {}
template_values['site'] = site
template_values['site_domain'] = site.domain
template_values['site_name'] = site.title
template_values['site_slogan'] = site.slogan
template_values['feed_url'] = 'http://' + template_values['site_domain'] + '/index.xml'
template_values['site_updated'] = datetime.datetime.now()
q = db.GqlQuery("SELECT * FROM Topic ORDER BY created DESC LIMIT 10")
template_values['topics'] = q
path = os.path.join(os.path.dirname(__file__), 'tpl', 'feed', 'index.xml')
output = template.render(path, template_values)
memcache.set('feed_index', output, 600)
self.response.out.write(output)
def main():
application = webapp.WSGIApplication([
('/index.xml', FeedHomeHandler),
('/feed/v2ex.rss', FeedHomeHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main() | qhm123/the10000 | feed.py | Python | bsd-3-clause | 1,821 |
##########################################################################
#
# Copyright (c) 2011-2013, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import Gaffer
import GafferUI
class AboutWindow( GafferUI.Window ) :
def __init__( self, about, **kw ) :
GafferUI.Window.__init__( self, title = "About " + about.name(), sizeMode=GafferUI.Window.SizeMode.Manual, borderWidth = 6, **kw )
self.__linkActivatedConnections = []
with self :
with GafferUI.TabbedContainer() :
with GafferUI.ListContainer(
GafferUI.ListContainer.Orientation.Vertical,
spacing=10,
borderWidth=10,
parenting = { "label" : "Gaffer" },
) :
GafferUI.Spacer(
IECore.V2i( 1 ),
parenting = { "expand" : True }
)
GafferUI.Image(
"GafferLogo.png",
parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center }
)
text = "<h3><a href='%s'>%s</a></h3> <small>Version %s</small>" % (
about.url(),
about.url(),
about.versionString(),
)
self.__label(
text,
horizontalAlignment = GafferUI.HorizontalAlignment.Center,
parenting = { "horizontalAlignment" : GafferUI.Label.HorizontalAlignment.Center },
)
GafferUI.Spacer(
IECore.V2i( 1 ),
parenting = { "expand" : True }
)
self.__label(
"<small>%s</small>" % about.copyright().replace( "(c)", "©" ),
parenting = { "horizontalAlignment" : GafferUI.Label.HorizontalAlignment.Center },
)
with GafferUI.ListContainer(
GafferUI.ListContainer.Orientation.Vertical,
spacing=10,
borderWidth=10,
parenting = { "label" : "License" },
) :
license = "".join( open( os.path.expandvars( about.license() ) ).readlines() )
with GafferUI.ScrolledContainer(
horizontalMode=GafferUI.ScrolledContainer.ScrollMode.Never,
verticalMode=GafferUI.ScrolledContainer.ScrollMode.Automatic,
borderWidth = 5
) :
self.__label( "<pre>" + license + "</pre>" )
dependencies = about.dependencies()
if dependencies :
with GafferUI.ListContainer(
GafferUI.ListContainer.Orientation.Vertical,
spacing=10,
borderWidth=10,
parenting = { "label" : "Dependencies" },
) :
with GafferUI.ScrolledContainer(
horizontalMode=GafferUI.ScrolledContainer.ScrollMode.Never,
verticalMode=GafferUI.ScrolledContainer.ScrollMode.Always,
borderWidth = 5
) :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=5, borderWidth=10 ) :
text = "<p>%s</p>" % self.__wrapText( about.dependenciesPreamble() )
for d in dependencies :
text += "<h3>%s</h3>" % d["name"]
if "credit" in d :
text += "<p>%s</p>" % self.__wrapText( d["credit"] )
if "license" in d :
text += "<a href='file://%s'>License</a>" % os.path.expandvars( d["license"] )
if "url" in d :
if "license" in d :
text += " | "
text += "<a href='%s'>%s</a>" % ( d["url"], d["url"] )
self.__label( text )
def __wrapText( self, text ) :
return IECore.StringUtil.wrap( text, 80 ).replace( '\n', "<br>" )
def __label( self, text, **kw ) :
## \todo Perhaps this stylesheet stuff should be done as standard for all labels?
header = "<html><head><style type=text/css>"
header += "a:link { color:#bbbbbb; text-decoration:none }"
header += "</style></head><body>"
footer = "</body></html>"
text = header + text + footer
label = GafferUI.Label( text, **kw )
self.__linkActivatedConnections.append( label.linkActivatedSignal().connect( Gaffer.WeakMethod( self.__linkActivated ) ) )
return label
def __linkActivated( self, label, url ) :
GafferUI.showURL( url )
| cedriclaunay/gaffer | python/GafferUI/AboutWindow.py | Python | bsd-3-clause | 5,545 |
import datetime
from flask import g
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Sequence,
String,
Table,
UniqueConstraint,
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from ... import Model
from ..._compat import as_unicode
_dont_audit = False
class Permission(Model):
__tablename__ = "ab_permission"
id = Column(Integer, Sequence("ab_permission_id_seq"), primary_key=True)
name = Column(String(100), unique=True, nullable=False)
def __repr__(self):
return self.name
class ViewMenu(Model):
__tablename__ = "ab_view_menu"
id = Column(Integer, Sequence("ab_view_menu_id_seq"), primary_key=True)
name = Column(String(250), unique=True, nullable=False)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __repr__(self):
return self.name
assoc_permissionview_role = Table(
"ab_permission_view_role",
Model.metadata,
Column("id", Integer, Sequence("ab_permission_view_role_id_seq"), primary_key=True),
Column("permission_view_id", Integer, ForeignKey("ab_permission_view.id")),
Column("role_id", Integer, ForeignKey("ab_role.id")),
UniqueConstraint("permission_view_id", "role_id"),
)
class Role(Model):
__tablename__ = "ab_role"
id = Column(Integer, Sequence("ab_role_id_seq"), primary_key=True)
name = Column(String(64), unique=True, nullable=False)
permissions = relationship(
"PermissionView", secondary=assoc_permissionview_role, backref="role"
)
def __repr__(self):
return self.name
class PermissionView(Model):
__tablename__ = "ab_permission_view"
__table_args__ = (UniqueConstraint("permission_id", "view_menu_id"),)
id = Column(Integer, Sequence("ab_permission_view_id_seq"), primary_key=True)
permission_id = Column(Integer, ForeignKey("ab_permission.id"))
permission = relationship("Permission")
view_menu_id = Column(Integer, ForeignKey("ab_view_menu.id"))
view_menu = relationship("ViewMenu")
def __repr__(self):
return str(self.permission).replace("_", " ") + " on " + str(self.view_menu)
assoc_user_role = Table(
"ab_user_role",
Model.metadata,
Column("id", Integer, Sequence("ab_user_role_id_seq"), primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("role_id", Integer, ForeignKey("ab_role.id")),
UniqueConstraint("user_id", "role_id"),
)
class User(Model):
__tablename__ = "ab_user"
id = Column(Integer, Sequence("ab_user_id_seq"), primary_key=True)
first_name = Column(String(64), nullable=False)
last_name = Column(String(64), nullable=False)
username = Column(String(64), unique=True, nullable=False)
password = Column(String(256))
active = Column(Boolean)
email = Column(String(64), unique=True, nullable=False)
last_login = Column(DateTime)
login_count = Column(Integer)
fail_login_count = Column(Integer)
roles = relationship("Role", secondary=assoc_user_role, backref="user")
created_on = Column(DateTime, default=datetime.datetime.now, nullable=True)
changed_on = Column(DateTime, default=datetime.datetime.now, nullable=True)
@declared_attr
def created_by_fk(self):
return Column(
Integer, ForeignKey("ab_user.id"), default=self.get_user_id, nullable=True
)
@declared_attr
def changed_by_fk(self):
return Column(
Integer, ForeignKey("ab_user.id"), default=self.get_user_id, nullable=True
)
created_by = relationship(
"User",
backref=backref("created", uselist=True),
remote_side=[id],
primaryjoin="User.created_by_fk == User.id",
uselist=False,
)
changed_by = relationship(
"User",
backref=backref("changed", uselist=True),
remote_side=[id],
primaryjoin="User.changed_by_fk == User.id",
uselist=False,
)
@classmethod
def get_user_id(cls):
try:
return g.user.id
except Exception:
return None
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return self.active
@property
def is_anonymous(self):
return False
def get_id(self):
return as_unicode(self.id)
def get_full_name(self):
return u"{0} {1}".format(self.first_name, self.last_name)
def __repr__(self):
return self.get_full_name()
class RegisterUser(Model):
__tablename__ = "ab_register_user"
id = Column(Integer, Sequence("ab_register_user_id_seq"), primary_key=True)
first_name = Column(String(64), nullable=False)
last_name = Column(String(64), nullable=False)
username = Column(String(64), unique=True, nullable=False)
password = Column(String(256))
email = Column(String(64), nullable=False)
registration_date = Column(DateTime, default=datetime.datetime.now, nullable=True)
registration_hash = Column(String(256))
| dpgaspar/Flask-AppBuilder | flask_appbuilder/security/sqla/models.py | Python | bsd-3-clause | 5,212 |
from __future__ import absolute_import, unicode_literals
import csv
import datetime
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.forms import SelectDateForm
from wagtail.wagtailforms.models import get_forms_for_user
def index(request):
form_pages = get_forms_for_user(request.user)
paginator, form_pages = paginate(request, form_pages)
return render(request, 'wagtailforms/index.html', {
'form_pages': form_pages,
})
def delete_submission(request, page_id, submission_id):
if not get_forms_for_user(request.user).filter(id=page_id).exists():
raise PermissionDenied
page = get_object_or_404(Page, id=page_id).specific
submission = get_object_or_404(page.get_submission_class(), id=submission_id)
if request.method == 'POST':
submission.delete()
messages.success(request, _("Submission deleted."))
return redirect('wagtailforms:list_submissions', page_id)
return render(request, 'wagtailforms/confirm_delete.html', {
'page': page,
'submission': submission
})
def list_submissions(request, page_id):
if not get_forms_for_user(request.user).filter(id=page_id).exists():
raise PermissionDenied
form_page = get_object_or_404(Page, id=page_id).specific
form_submission_class = form_page.get_submission_class()
data_fields = form_page.get_data_fields()
submissions = form_submission_class.objects.filter(page=form_page).order_by('submit_time')
data_headings = [label for name, label in data_fields]
select_date_form = SelectDateForm(request.GET)
if select_date_form.is_valid():
date_from = select_date_form.cleaned_data.get('date_from')
date_to = select_date_form.cleaned_data.get('date_to')
# careful: date_to should be increased by 1 day since the submit_time
# is a time so it will always be greater
if date_to:
date_to += datetime.timedelta(days=1)
if date_from and date_to:
submissions = submissions.filter(submit_time__range=[date_from, date_to])
elif date_from and not date_to:
submissions = submissions.filter(submit_time__gte=date_from)
elif not date_from and date_to:
submissions = submissions.filter(submit_time__lte=date_to)
if request.GET.get('action') == 'CSV':
# return a CSV instead
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment;filename=export.csv'
# Prevents UnicodeEncodeError for labels with non-ansi symbols
data_headings = [smart_str(label) for label in data_headings]
writer = csv.writer(response)
writer.writerow(data_headings)
for s in submissions:
data_row = []
form_data = s.get_data()
for name, label in data_fields:
data_row.append(smart_str(form_data.get(name)))
writer.writerow(data_row)
return response
paginator, submissions = paginate(request, submissions)
data_rows = []
for s in submissions:
form_data = s.get_data()
data_row = [form_data.get(name) for name, label in data_fields]
data_rows.append({
"model_id": s.id,
"fields": data_row
})
return render(request, 'wagtailforms/index_submissions.html', {
'form_page': form_page,
'select_date_form': select_date_form,
'submissions': submissions,
'data_headings': data_headings,
'data_rows': data_rows
})
| chrxr/wagtail | wagtail/wagtailforms/views.py | Python | bsd-3-clause | 3,935 |
class ResizeError(Exception):
pass
def codelengths_from_frequencies(freqs):
freqs = sorted(freqs.items(),
key=lambda item: (item[1], -item[0]), reverse=True)
nodes = [Node(char=key, weight=value) for (key, value) in freqs]
while len(nodes) > 1:
right, left = nodes.pop(), nodes.pop()
node = Node(weight=right.weight + left.weight)
node.add([left, right])
if not nodes:
nodes.append(node)
else:
pos = 0
while pos < len(nodes) and nodes[pos].weight > node.weight:
pos += 1
nodes.insert(pos, node)
top = nodes[0]
tree = Tree(top)
tree.reduce(15)
codes = tree.codes()
code_items = list(codes.items())
code_items.sort(key=lambda item:(len(item[1]), item[0]))
return [(car, len(value)) for car, value in code_items]
def normalized(codelengths):
car, codelength = codelengths[0]
value = 0
codes = {car: "0" * codelength}
for (newcar, nbits) in codelengths[1:]:
value += 1
bvalue = str(bin(value))[2:]
bvalue = "0" * (codelength - len(bvalue)) + bvalue
if nbits > codelength:
codelength = nbits
bvalue += "0" * (codelength - len(bvalue))
value = int(bvalue, 2)
assert len(bvalue) == nbits
codes[newcar] = bvalue
return codes
class Tree:
def __init__(self, root):
self.root = root
self.nb_levels = 0
def length(self):
self.root.level = 0
node = self.root
nb_levels = 0
def set_level(node):
nonlocal nb_levels
for child in node.children:
child.level = node.level + 1
nb_levels = max(nb_levels, child.level)
if not child.is_leaf:
set_level(child)
set_level(self.root)
return nb_levels
def reduce_tree(self):
"""Change the tree to reduce the number of levels.
Uses the algorithm described in
http://compressions.sourceforge.net/Huffman.html#3
"""
currentlen = self.length()
deepest = self.nodes_at(currentlen)
deepest_leaves = [node for node in deepest if node.is_leaf]
rightmost_leaf = deepest_leaves[-1]
sibling = rightmost_leaf.parent.children[0]
# replace rightmost_leaf's parent by rightmost_leaf
parent = rightmost_leaf.parent
grand_parent = parent.parent
rank = grand_parent.children.index(parent)
children = grand_parent.children
children[rank] = rightmost_leaf
grand_parent.add(children)
# find first upper level with leaves
up_level = rightmost_leaf.level - 2
while up_level > 0:
nodes = self.nodes_at(up_level)
leaf_nodes = [node for node in nodes if node.is_leaf]
if leaf_nodes:
leftmost_leaf = leaf_nodes[0]
# replace by node with leaves = [sibling, leftmost_leaf]
parent = leftmost_leaf.parent
rank = parent.children.index(leftmost_leaf)
new_node = Node()
new_node.level = leftmost_leaf.level
children = [sibling, leftmost_leaf]
new_node.add(children)
parent.children[rank] = new_node
new_node.parent = parent
break
else:
up_level -= 1
if up_level == 0:
raise ResizeError
def nodes_at(self, level, top=None):
"""Return list of all the nodes below top at specified level."""
res = []
if top is None:
top = self.root
if top.level == level:
res = [top]
elif not top.is_leaf:
for child in top.children:
res += self.nodes_at(level, child)
return res
def reduce(self, maxlevels):
"""Reduce number of levels to maxlevels, if possible."""
while self.length() > maxlevels:
self.reduce_tree()
def codes(self, node=None, code=''):
"""Returns a dictionary mapping leaf characters to the Huffman code
of the node, as a string of 0's and 1's."""
if node is None:
self.dic = {}
node = self.root
if node.is_leaf:
self.dic[node.char] = code
else:
for i, child in enumerate(node.children):
self.codes(child, code + str(i))
return self.dic
class Node:
def __init__(self, char=None, weight=0, level=0):
self.char = char
self.is_leaf = char is not None
self.level = level
self.weight = weight
self.height = 0
def add(self, children):
self.children = children
for child in self.children:
child.parent = self
child.level = self.level + 1
self.height = max(self.height, children[0].height + 1,
children[1].height + 1)
node = self
while hasattr(node, "parent"):
node.parent.height = max(node.parent.height, node.height + 1)
node = node.parent
def __repr__(self):
if self.is_leaf:
return f'{chr(self.char)!r}'
else:
return f'{self.children}'
class Compresser:
def __init__(self, text):
if not isinstance(text, (bytes, bytearray, memoryview)):
raise TypeError("a bytes-like object is required, not '" +
type(text).__name__ + "'")
self.text = text
freqs = {}
for car in self.text:
freqs[car] = freqs.get(car, 0) + 1
self.codelengths = codelengths_from_frequencies(freqs)
self.codes = normalized(self.codelengths)
self.max_codelength = max(len(v) for v in self.codes.values())
def compressed_bytes(self):
compressed = self.compressed_str() + self.codes[256]
out = bytearray()
pos = 0
while pos < len(compressed):
bits = compressed[pos:pos + 8]
byte = int(bits, 2)
if len(bits) < 8:
byte <<= (8 - len(bits))
out.append(byte)
pos += 8
return out
def compressed_str(self):
return ''.join(self.codes[car] for car in self.text)
class Decompresser:
def __init__(self, compressed, codelengths):
self.compressed = compressed
codes = normalized(codelengths)
self.codes = {value : key for key, value in codes.items()}
self.root = Node()
self.make_tree(self.root)
def make_tree(self, node):
if node is self.root:
node.code = ''
children = []
for bit in '01':
next_code = node.code + bit
if next_code in self.codes:
child = Node(char=self.codes[next_code])
else:
child = Node()
child.code = next_code
children.append(child)
node.add(children)
for child in children:
if not child.is_leaf:
self.make_tree(child)
def decompress(self):
source = self.compressed
if isinstance(source, (bytes, bytearray)):
return self.decompress_bytes()
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
code = int(source[pos])
child = node.children[code]
if child.is_leaf:
res.append(child)
node = self.root
else:
node = child
pos += 1
return bytes(res)
def decompress_bytes(self):
source = self.compressed
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
byte = source[pos]
mask = 128
while mask > 0:
code = bool(byte & mask)
child = node.children[code]
if child.is_leaf:
if child.char == 256:
break # end of block
res.append(child.char)
node = self.root
else:
node = child
mask >>= 1
pos += 1
return res
def compress(text, klass=bytes):
compr = Compresser(text)
result = {"codelengths": compr.codelengths}
if klass is bytes:
result["data"] = compr.compressed_bytes()
elif klass is str:
result["data"] = compr.compressed_str()
else:
raise TypeError("second argument of compress must be bytes or "
"str, not '{}'".format(klass))
return result
def decompress(data, codelengths):
decomp = Decompresser(data, codelengths)
return decomp.decompress()
| kikocorreoso/brython | www/tests/compression/huffman.py | Python | bsd-3-clause | 8,832 |
""""
This module handles sending grades back to edX
Most of this module is a python 3 port of pylti (github.com/mitodl/sga-lti)
and should be moved back into that library.
"""
import uuid
from xml.etree import ElementTree as etree
import oauth2
from django.conf import settings
class SendGradeFailure(Exception):
""" Exception class for failures sending grades to edX"""
def send_grade(consumer_key, edx_url, result_id, grade):
""" Sends a grade to edX """
if consumer_key not in settings.LTI_OAUTH_CREDENTIALS:
raise SendGradeFailure("Invalid consumer_key %s" % consumer_key)
body = generate_request_xml(str(uuid.uuid1()), "replaceResult", result_id, grade)
secret = settings.LTI_OAUTH_CREDENTIALS[consumer_key]
response, content = _post_patched_request(consumer_key, secret, body, edx_url, "POST", "application/xml")
if isinstance(content, bytes):
content = content.decode("utf8")
if "<imsx_codeMajor>success</imsx_codeMajor>" not in content:
raise SendGradeFailure("Send grades to edX returned %s" % response.status)
def _post_patched_request(lti_key, secret, body, url, method, content_type): # pylint: disable=too-many-arguments
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode("utf8"),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
return response, content
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element('imsx_POXEnvelopeRequest',
xmlns='http://www.imsglobal.org/services/'
'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='unicode'))
return ret
| koljanos/sga-lti | sga/backend/send_grades.py | Python | bsd-3-clause | 4,039 |
from .assign_role import AssignRoleBulkAction
from .delete import DeleteBulkAction
from .set_active_state import SetActiveStateBulkAction
__all__ = ["AssignRoleBulkAction", "DeleteBulkAction", "SetActiveStateBulkAction"]
| wagtail/wagtail | wagtail/users/views/bulk_actions/__init__.py | Python | bsd-3-clause | 222 |
from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hook_name, fn, order):
self.hook_name = hook_name
self.fn = fn
self.order = order
def __enter__(self):
if self.hook_name not in _hooks:
_hooks[self.hook_name] = []
_hooks[self.hook_name].append((self.fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
_hooks[self.hook_name].remove((self.fn, self.order))
def register_temporarily(hook_name, fn, order=0):
"""
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
"""
return TemporaryHook(hook_name, fn, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules('wagtail_hooks'))
_searched_for_hooks = True
def get_hooks(hook_name):
""" Return the hooks function sorted by their order. """
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
| kaedroho/wagtail | wagtail/core/hooks.py | Python | bsd-3-clause | 2,294 |
"""
sentry.management.commands.create_sample_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.core.management.base import BaseCommand, CommandError, make_option
class Command(BaseCommand):
help = 'Creates a sample event in Sentry (if applicable)'
option_list = BaseCommand.option_list + (
make_option('--project', dest='project', help="project ID or team-slug/project-slug"),
make_option('--platform', dest='platform'),
)
def handle(self, **options):
from django.conf import settings
from sentry.constants import PLATFORM_LIST
from sentry.models import Project
from sentry.utils.samples import create_sample_event
if not options['project']:
project = Project.objects.get(id=settings.SENTRY_PROJECT)
else:
if options['project'].isdigit():
project = Project.objects.get(id=options['project'])
elif '/' in options['project']:
t_slug, p_slug = options['project'].split('/', 1)
project = Project.objects.get(slug=p_slug, team__slug=t_slug)
else:
raise CommandError('Project must be specified as team-slug/project-slug or a project id')
if options['platform'] not in PLATFORM_LIST:
raise CommandError('Invalid platform. Must specify one of: %s' % ', '.join(PLATFORM_LIST))
platform = options['platform'] or project.platform
event = create_sample_event(project, platform)
if not event:
raise CommandError('Unable to create an event for platform %r' % (str(platform),))
self.stdout.write('Event created: %s' % (event.group.get_absolute_url(),))
| rdio/sentry | src/sentry/management/commands/create_sample_event.py | Python | bsd-3-clause | 1,839 |
import os
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
from pandas.tseries.offsets import (
Day,
Nano,
)
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("labels", ["foo", 1, True])
def test_qcut_incorrect_labels(labels):
# GH 13318
values = range(5)
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize("labels", [["a", "b", "c"], list(range(3))])
def test_qcut_wrong_length_labels(labels):
# GH 13318
values = range(10)
msg = "Bin labels must be one fewer than the number of bin edges"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize(
"labels, expected",
[
(["a", "b", "c"], Categorical(["a", "b", "c"], ordered=True)),
(list(range(3)), Categorical([0, 1, 2], ordered=True)),
],
)
def test_qcut_list_like_labels(labels, expected):
# GH 13318
values = range(3)
result = qcut(values, 3, labels=labels)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
({"duplicates": "drop"}, None),
({}, "Bin edges must be unique"),
({"duplicates": "raise"}, "Bin edges must be unique"),
({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length, dtype=np.intp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = qcut(ser, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:59:59.999999999", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"arg,expected_bins",
[
[
timedelta_range("1day", periods=3),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
],
[
date_range("20180101", periods=3),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]),
],
],
)
def test_date_like_qcut_bins(arg, expected_bins):
# see gh-19891
ser = Series(arg)
result, result_bins = qcut(ser, 2, retbins=True)
tm.assert_index_equal(result_bins, expected_bins)
@pytest.mark.parametrize("bins", [6, 7])
@pytest.mark.parametrize(
"box, compare",
[
(Series, tm.assert_series_equal),
(np.array, tm.assert_categorical_equal),
(list, tm.assert_equal),
],
)
def test_qcut_bool_coercion_to_int(bins, box, compare):
# issue 20303
data_expected = box([0, 1, 1, 0, 1] * 10)
data_result = box([False, True, True, False, True] * 10)
expected = qcut(data_expected, bins, duplicates="drop")
result = qcut(data_result, bins, duplicates="drop")
compare(result, expected)
@pytest.mark.parametrize("q", [2, 5, 10])
def test_qcut_nullable_integer(q, any_numeric_ea_dtype):
arr = pd.array(np.arange(100), dtype=any_numeric_ea_dtype)
arr[::2] = pd.NA
result = qcut(arr, q)
expected = qcut(arr.astype(float), q)
tm.assert_categorical_equal(result, expected)
| rs2/pandas | pandas/tests/reshape/test_qcut.py | Python | bsd-3-clause | 8,222 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import sys
# append facerec to module search path
sys.path.append("../..")
# import facerec stuff
from facerec.dataset import DataSet
from facerec.feature import Fisherfaces
from facerec.distance import EuclideanDistance, CosineDistance
from facerec.classifier import NearestNeighbor
from facerec.classifier import SVM
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
from facerec.util import minmax_normalize
# import numpy
import numpy as np
# import matplotlib colormaps
import matplotlib.cm as cm
# import for logging
import logging,sys
# set up a handler for logging
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add handler to facerec modules
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# load a dataset (e.g. AT&T Facedatabase)
dataSet = DataSet("/home/philipp/facerec/data/yalefaces_recognition")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data, dataSet.labels)
# turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.pdf")
# perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(dataSet.data, dataSet.labels)
cv.print_results()
| dashmoment/facerecognition | py/apps/scripts/fisherfaces_example.py | Python | bsd-3-clause | 2,253 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.names.tap}.
"""
from twisted.trial.unittest import TestCase
from twisted.python.usage import UsageError
from twisted.names.tap import Options, _buildResolvers
from twisted.names.dns import PORT
from twisted.names.secondary import SecondaryAuthorityService
from twisted.names.resolve import ResolverChain
from twisted.names.client import Resolver
class OptionsTests(TestCase):
"""
Tests for L{Options}, defining how command line arguments for the DNS server
are parsed.
"""
def test_malformedSecondary(self):
"""
If the value supplied for an I{--secondary} option does not provide a
server IP address, optional port number, and domain name,
L{Options.parseOptions} raises L{UsageError}.
"""
options = Options()
self.assertRaises(
UsageError, options.parseOptions, ['--secondary', ''])
self.assertRaises(
UsageError, options.parseOptions, ['--secondary', '1.2.3.4'])
self.assertRaises(
UsageError, options.parseOptions, ['--secondary', '1.2.3.4:hello'])
self.assertRaises(
UsageError, options.parseOptions,
['--secondary', '1.2.3.4:hello/example.com'])
def test_secondary(self):
"""
An argument of the form C{"ip/domain"} is parsed by L{Options} for the
I{--secondary} option and added to its list of secondaries, using the
default DNS port number.
"""
options = Options()
options.parseOptions(['--secondary', '1.2.3.4/example.com'])
self.assertEqual(
[(('1.2.3.4', PORT), ['example.com'])], options.secondaries)
def test_secondaryExplicitPort(self):
"""
An argument of the form C{"ip:port/domain"} can be used to specify an
alternate port number for for which to act as a secondary.
"""
options = Options()
options.parseOptions(['--secondary', '1.2.3.4:5353/example.com'])
self.assertEqual(
[(('1.2.3.4', 5353), ['example.com'])], options.secondaries)
def test_secondaryAuthorityServices(self):
"""
After parsing I{--secondary} options, L{Options} constructs a
L{SecondaryAuthorityService} instance for each configured secondary.
"""
options = Options()
options.parseOptions(['--secondary', '1.2.3.4:5353/example.com',
'--secondary', '1.2.3.5:5354/example.com'])
self.assertEqual(len(options.svcs), 2)
secondary = options.svcs[0]
self.assertIsInstance(options.svcs[0], SecondaryAuthorityService)
self.assertEqual(secondary.primary, '1.2.3.4')
self.assertEqual(secondary._port, 5353)
secondary = options.svcs[1]
self.assertIsInstance(options.svcs[1], SecondaryAuthorityService)
self.assertEqual(secondary.primary, '1.2.3.5')
self.assertEqual(secondary._port, 5354)
def test_recursiveConfiguration(self):
"""
Recursive DNS lookups, if enabled, should be a last-resort option.
Any other lookup method (cache, local lookup, etc.) should take
precedence over recursive lookups
"""
options = Options()
options.parseOptions(['--hosts-file', 'hosts.txt', '--recursive'])
ca, cl = _buildResolvers(options)
# Extra cleanup, necessary on POSIX because client.Resolver doesn't know
# when to stop parsing resolv.conf. See #NNN for improving this.
for x in cl:
if isinstance(x, ResolverChain):
recurser = x.resolvers[-1]
if isinstance(recurser, Resolver):
recurser._parseCall.cancel()
self.assertIsInstance(cl[-1], ResolverChain)
| hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/names/test/test_tap.py | Python | bsd-3-clause | 3,955 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import inf
import pytest
import numpy as np
from astropy.cosmology.utils import inf_like, vectorize_if_needed, vectorize_redshift_method
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology.utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray)
def test_vectorize_if_needed():
"""
Test :func:`astropy.cosmology.utils.vectorize_if_needed`.
There's no need to test 'veckw' because that is directly pasased to
`numpy.vectorize` which thoroughly tests the various inputs.
"""
func = lambda x: x ** 2
with pytest.warns(AstropyDeprecationWarning):
# not vectorized
assert vectorize_if_needed(func, 2) == 4
# vectorized
assert all(vectorize_if_needed(func, [2, 3]) == [4, 9])
@pytest.mark.parametrize("arr, expected",
[(0.0, inf), # float scalar
(1, inf), # integer scalar should give float output
([0.0, 1.0, 2.0, 3.0], (inf, inf, inf, inf)),
([0, 1, 2, 3], (inf, inf, inf, inf)), # integer list
])
def test_inf_like(arr, expected):
"""
Test :func:`astropy.cosmology.utils.inf_like`.
All inputs should give a float output.
These tests are also in the docstring, but it's better to have them also
in one consolidated location.
"""
with pytest.warns(AstropyDeprecationWarning):
assert np.all(inf_like(arr) == expected)
| mhvk/astropy | astropy/cosmology/tests/test_utils.py | Python | bsd-3-clause | 2,328 |
import os
import amo.search
from .models import Reindexing
from django.core.management.base import CommandError
# shortcut functions
is_reindexing_amo = Reindexing.objects.is_reindexing_amo
flag_reindexing_amo = Reindexing.objects.flag_reindexing_amo
unflag_reindexing_amo = Reindexing.objects.unflag_reindexing_amo
get_indices = Reindexing.objects.get_indices
def index_objects(ids, model, search, index=None, transforms=None):
if index is None:
index = model._get_index()
indices = Reindexing.objects.get_indices(index)
if transforms is None:
transforms = []
qs = model.objects.no_cache().filter(id__in=ids)
for t in transforms:
qs = qs.transform(t)
for ob in qs:
data = search.extract(ob)
for index in indices:
model.index(data, bulk=True, id=ob.id, index=index)
amo.search.get_es().flush_bulk(forced=True)
def raise_if_reindex_in_progress(site):
"""Checks if the database indexation flag is on for the given site.
If it's on, and if no "FORCE_INDEXING" variable is present in the env,
raises a CommandError.
"""
already_reindexing = Reindexing.objects._is_reindexing(site)
if already_reindexing and 'FORCE_INDEXING' not in os.environ:
raise CommandError("Indexation already occuring. Add a FORCE_INDEXING "
"variable in the environ to force it")
| anaran/olympia | lib/es/utils.py | Python | bsd-3-clause | 1,403 |
from django.http import Http404
from django.shortcuts import render_to_response
from django.conf import settings
from django.template import RequestContext
from django.core.cache import parse_backend_uri
from django_memcached.util import get_memcached_stats
from django.contrib.auth.decorators import user_passes_test
_, hosts, _ = parse_backend_uri(settings.CACHE_BACKEND)
SERVERS = hosts.split(';')
def server_list(request):
statuses = zip(range(len(SERVERS)), SERVERS, map(get_memcached_stats, SERVERS))
context = {
'statuses': statuses,
}
return render_to_response(
'memcached/server_list.html',
context,
context_instance=RequestContext(request)
)
def server_status(request, index):
try:
index = int(index)
except ValueError:
raise Http404
if 'memcached' not in settings.CACHE_BACKEND:
raise Http404
if not SERVERS:
raise Http404
try:
server = SERVERS[index]
except IndexError:
raise Http404
stats = get_memcached_stats(server)
if not stats:
raise Http404
context = {
'server': server,
'stats': stats.items(),
}
return render_to_response(
'memcached/server_status.html',
context,
context_instance=RequestContext(request)
)
if getattr(settings, 'DJANGO_MEMCACHED_REQUIRE_STAFF', False):
server_list = user_passes_test(lambda u: u.is_staff)(server_list)
server_status = user_passes_test(lambda u: u.is_staff)(server_status)
| alaindomissy/django-memcached | django_memcached/views.py | Python | bsd-3-clause | 1,535 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
# Module under test
import bokeh.util.string as bus
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_escape(object):
def test_default_quote(self):
assert bus.escape("foo'bar") == "foo'bar"
assert bus.escape('foo"bar') == "foo"bar"
def test_quote_False(self):
assert bus.escape("foo'bar", quote=False) == "foo'bar"
assert bus.escape('foo"bar', quote=False) == 'foo"bar'
def test_quote_custom(self):
assert bus.escape("foo'bar", quote=('"'),) == "foo'bar"
assert bus.escape("foo'bar", quote=("'"),) == "foo'bar"
assert bus.escape('foo"bar', quote=("'"),) == 'foo"bar'
assert bus.escape('foo"bar', quote=('"'),) == "foo"bar"
def test_amp(self):
assert bus.escape("foo&bar") == "foo&bar"
def test_lt(self):
assert bus.escape("foo<bar") == "foo<bar"
def test_gt(self):
assert bus.escape("foo>bar") == "foo>bar"
class Test_format_doctring(object):
def test_no_argument(self):
doc__ = "hello world"
assert bus.format_docstring(doc__) == doc__
doc__ = None
assert bus.format_docstring(doc__) == None
def test_arguments_unused(self):
doc__ = "hello world"
assert bus.format_docstring(doc__, 'hello ', not_used='world') == doc__
doc__ = None
assert bus.format_docstring(doc__, 'hello ', not_used='world') == None
def test_arguments(self):
doc__ = "-- {}{as_parameter} --"
assert bus.format_docstring(doc__, 'hello ', as_parameter='world') == "-- hello world --"
doc__ = None
assert bus.format_docstring(doc__, 'hello ', as_parameter='world') == None
class Test_indent(object):
TEXT = "some text\nto indent\n goes here"
def test_default_args(self):
assert bus.indent(self.TEXT) == " some text\n to indent\n goes here"
def test_with_n(self):
assert bus.indent(self.TEXT, n=3) == " some text\n to indent\n goes here"
def test_with_ch(self):
assert bus.indent(self.TEXT, ch="-") == "--some text\n--to indent\n-- goes here"
class Test_nice_join(object):
def test_default(self):
assert bus.nice_join(["one"]) == "one"
assert bus.nice_join(["one", "two"]) == "one or two"
assert bus.nice_join(["one", "two", "three"]) == "one, two or three"
assert bus.nice_join(["one", "two", "three", "four"]) == "one, two, three or four"
def test_string_conjunction(self):
assert bus.nice_join(["one"], conjuction="and") == "one"
assert bus.nice_join(["one", "two"], conjuction="and") == "one and two"
assert bus.nice_join(["one", "two", "three"], conjuction="and") == "one, two and three"
assert bus.nice_join(["one", "two", "three", "four"], conjuction="and") == "one, two, three and four"
def test_None_conjunction(self):
assert bus.nice_join(["one"], conjuction=None) == "one"
assert bus.nice_join(["one", "two"], conjuction=None) == "one, two"
assert bus.nice_join(["one", "two", "three"], conjuction=None) == "one, two, three"
assert bus.nice_join(["one", "two", "three", "four"], conjuction=None) == "one, two, three, four"
def test_sep(self):
assert bus.nice_join(["one"], sep='; ') == "one"
assert bus.nice_join(["one", "two"], sep='; ') == "one or two"
assert bus.nice_join(["one", "two", "three"], sep='; ') == "one; two or three"
assert bus.nice_join(["one", "two", "three", "four"], sep="; ") == "one; two; three or four"
def test_snakify():
assert bus.snakify("MyClassName") == "my_class_name"
assert bus.snakify("My1Class23Name456") == "my1_class23_name456"
assert bus.snakify("MySUPERClassName") == "my_super_class_name"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| stonebig/bokeh | bokeh/util/tests/test_string.py | Python | bsd-3-clause | 5,444 |
from morepath import compat
def test_text_type():
assert isinstance(u'foo', compat.text_type)
assert not isinstance(b'foo', compat.text_type)
def test_string_types():
assert isinstance('foo', compat.string_types)
assert isinstance(u'foo', compat.string_types)
if compat.PY3:
assert not isinstance(b'foo', compat.string_types)
else:
assert isinstance(b'foo', compat.string_types)
def test_bytes_():
text = u'Z\N{latin small letter u with diaeresis}rich'
code = compat.bytes_(text)
assert isinstance(code, bytes)
assert code == compat.bytes_(code)
def test_withclass():
class Meta(type):
pass
class Class(compat.with_metaclass(Meta)):
pass
assert type(Class) == Meta
assert Class.__bases__ == (object,)
| taschini/morepath | morepath/tests/test_compat.py | Python | bsd-3-clause | 799 |
# pylint: disable=too-many-lines
"""
Component to interface with cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import asyncio
import logging
from aiohttp import web
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView
DOMAIN = 'camera'
DEPENDENCIES = ['http']
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
STATE_RECORDING = 'recording'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?token={1}'
@asyncio.coroutine
def async_setup(hass, config):
"""Setup the camera component."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(CameraImageView(hass, component.entities))
hass.http.register_view(CameraMjpegStream(hass, component.entities))
yield from component.async_setup(config)
return True
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
@property
def access_token(self):
"""Access token for this camera."""
return str(id(self))
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_token)
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Camera brand."""
return None
@property
def model(self):
"""Camera model."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
@asyncio.coroutine
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop.
"""
image = yield from self.hass.loop.run_in_executor(
None, self.camera_image)
return image
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = ('multipart/x-mixed-replace; '
'boundary=--jpegboundary')
yield from response.prepare(request)
def write(img_bytes):
"""Write image to stream."""
response.write(bytes(
'--jpegboundary\r\n'
'Content-Type: image/jpeg\r\n'
'Content-Length: {}\r\n\r\n'.format(
len(img_bytes)), 'utf-8') + img_bytes + b'\r\n')
last_image = None
try:
while True:
img_bytes = yield from self.async_camera_image()
if not img_bytes:
break
if img_bytes is not None and img_bytes != last_image:
write(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
write(img_bytes)
last_image = img_bytes
yield from response.drain()
yield from asyncio.sleep(.5)
finally:
yield from response.write_eof()
@property
def state(self):
"""Camera state."""
if self.is_recording:
return STATE_RECORDING
elif self.is_streaming:
return STATE_STREAMING
else:
return STATE_IDLE
@property
def state_attributes(self):
"""Camera state attributes."""
attr = {
'access_token': self.access_token,
}
if self.model:
attr['model_name'] = self.model
if self.brand:
attr['brand'] = self.brand
return attr
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, hass, entities):
"""Initialize a basic camera view."""
super().__init__(hass)
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a get request."""
camera = self.entities.get(entity_id)
if camera is None:
return web.Response(status=404)
authenticated = (request.authenticated or
request.GET.get('token') == camera.access_token)
if not authenticated:
return web.Response(status=401)
response = yield from self.handle(request, camera)
return response
@asyncio.coroutine
def handle(self, request, camera):
"""Hanlde the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = "/api/camera_proxy/{entity_id}"
name = "api:camera:image"
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
image = yield from camera.async_camera_image()
if image is None:
return web.Response(status=500)
return web.Response(body=image)
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = "/api/camera_proxy_stream/{entity_id}"
name = "api:camera:stream"
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
yield from camera.handle_async_mjpeg_stream(request)
| srcLurker/home-assistant | homeassistant/components/camera/__init__.py | Python | mit | 5,960 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.messages import CMerkleBlock, FromHex, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.wallet import MiniWallet
class MerkleBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
[],
["-txindex"],
]
def run_test(self):
miniwallet = MiniWallet(self.nodes[0])
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
miniwallet.generate(5)
self.nodes[0].generate(100)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
txid1 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
txid2 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
# This will raise an exception because the transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = miniwallet.get_utxo() # Get the change from txid2
tx3 = miniwallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=txin_spent)
txid3 = tx3['txid']
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 # Input was change from txid2, so txid1 should be unspent
# Invalid txids
assert_raises_rpc_error(-8, "txid must be of length 64 (not 32, for '00000000000000000000000000000000')", self.nodes[0].gettxoutproof, ["00000000000000000000000000000000"], blockhash)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].gettxoutproof, ["ZZZ0000000000000000000000000000000000000000000000000000000000000"], blockhash)
# Invalid blockhashes
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 32, for '00000000000000000000000000000000')", self.nodes[0].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].gettxoutproof, [txid_spent], "ZZZ0000000000000000000000000000000000000000000000000000000000000")
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].gettxoutproof, [txid_spent], "0000000000000000000000000000000000000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[0].gettxoutproof, [txid1, txid3])
# Test empty list
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [])
# Test duplicate txid
assert_raises_rpc_error(-8, 'Invalid parameter, duplicated txid', self.nodes[0].gettxoutproof, [txid1, txid1])
# Now we'll try tweaking a proof.
proof = self.nodes[1].gettxoutproof([txid1, txid2])
assert txid1 in self.nodes[0].verifytxoutproof(proof)
assert txid2 in self.nodes[1].verifytxoutproof(proof)
tweaked_proof = FromHex(CMerkleBlock(), proof)
# Make sure that our serialization/deserialization is working
assert txid1 in self.nodes[0].verifytxoutproof(ToHex(tweaked_proof))
# Check to see if we can go up the merkle tree and pass this off as a
# single-transaction block
tweaked_proof.txn.nTransactions = 1
tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
tweaked_proof.txn.vBits = [True] + [False]*7
for n in self.nodes:
assert not n.verifytxoutproof(ToHex(tweaked_proof))
# TODO: try more variants, eg transactions at different depths, and
# verify that the proofs are invalid
if __name__ == '__main__':
MerkleBlockTest().main()
| ElementsProject/elements | test/functional/rpc_txoutproof.py | Python | mit | 6,163 |
from unittest import TestCase
import validictory
class TestItems(TestCase):
def test_property(self):
schema = {
"type": "object",
"properties": {
"foo": {
"default": "bar"
},
"baz": {
"type": "integer"
}
}
}
data = {'baz': 2}
result = validictory.validate(data, schema, required_by_default=False)
self.assertEqual(result, {"foo": "bar", "baz": 2})
def test_item(self):
schema = {
'type': 'object',
'type': 'array',
'items': [
{
'type': 'any'
},
{
'type': 'string'
},
{
'default': 'baz'
},
]
}
data = ['foo', 'bar']
result = validictory.validate(data, schema, required_by_default=False)
self.assertEqual(result, ["foo", "bar", "baz"])
| jalaziz/validictory | validictory/tests/test_defaults.py | Python | mit | 1,074 |