code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is used for exporting data in the database for all hosts
to a BIND readable text form.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import bz2
import ConfigParser
import datetime
import iscpy
import os
import StringIO
import shutil
import tarfile
from roster_core import punycode_lib
from roster_core import audit_log
from roster_core import config
from roster_core import constants
from roster_core import core
from roster_core import errors
from roster_core import helpers_lib
from roster_config_manager import config_lib
from roster_config_manager import zone_exporter_lib
core.CheckCoreVersionMatches(__version__)
class Error(errors.CoreError):
pass
class MaintenanceError(Error):
pass
class ChangesNotFoundError(Error):
pass
class BindTreeExport(object):
"""This class exports zones"""
def __init__(self, config_file_name):
"""Sets self.db_instance
Inputs:
config_file_name: name of config file to load db info from
"""
self.tar_file_name = ''
config_instance = config.Config(file_name=config_file_name)
self.db_instance = config_instance.GetDb()
self.config_lib_instance = config_lib.ConfigLib(config_file_name)
self.raw_data = {}
self.cooked_data = {}
self.root_config_dir = config_instance.config_file['exporter'][
'root_config_dir']
self.backup_dir = os.path.abspath(os.path.expanduser(
config_instance.config_file['exporter']['backup_dir']))
self.root_hint_file = os.path.abspath(os.path.expanduser(
config_instance.config_file['exporter']['root_hint_file']))
self.log_instance = audit_log.AuditLog(log_to_syslog=True, log_to_db=True,
db_instance=self.db_instance)
def NamedHeaderChangeDirectory(self, named_conf_header, new_directory):
"""Adds/Changes directory in named.conf header
Inputs:
named_conf_header: string of namedconf header
new_directory: {}
Outputs:
string: string of namedconf header
"""
named_conf_header_contents = iscpy.ParseISCString(named_conf_header)
if( 'options' not in named_conf_header_contents ):
named_conf_header_contents['options'] = {}
named_conf_header_contents['options']['directory'] = '"%s"' % new_directory
return iscpy.MakeISC(named_conf_header_contents)
def AddToTarFile(self, tar_file, file_name, file_string):
"""Adds file string to tarfile object
Inputs:
tarfile: tarfile object
file_name: string of filename to add
file_string: string of file
"""
info = tarfile.TarInfo(name=file_name)
info.size = len(file_string)
tar_file.addfile(info, StringIO.StringIO(file_string))
def ListRecordArgumentDefinitions(self, record_arguments):
"""Lists record argument definitions given table from database
This function is duplicated in roster-core/roster_core/core.py
Inputs:
record_arguments: record arguments from database
Outputs:
dictionary keyed by record type with values of lists
of lists of record arguments sorted by argument order.
example: {'mx': [{'argument_name': u'priority',
'record_arguments_type': u'mx',
'argument_data_type': u'UnsignedInt',
'argument_order': 0},
{'argument_name': u'mail_server',
'record_arguments_type': u'mx',
'argument_data_type': u'Hostname',
'argument_order': 1}]}
"""
sorted_record_arguments = {}
for record_argument in record_arguments:
current_record_type = record_argument['record_arguments_type']
del record_argument['record_arguments_type']
del record_argument['argument_data_type']
if( not current_record_type in sorted_record_arguments ):
sorted_record_arguments[current_record_type] = []
sorted_record_arguments[current_record_type].append(record_argument)
for record_argument in sorted_record_arguments:
sorted_record_arguments[record_argument] = sorted(
sorted_record_arguments[record_argument],
key=lambda k: k['argument_order'])
return sorted_record_arguments
def ExportAllBindTrees(self, force=False):
"""Exports bind trees to files
Inputs:
force: boolean of if the export should continue if no changes are found
in the database
"""
function_name, current_args = helpers_lib.GetFunctionNameAndArgs()
success = False
try:
self.db_instance.StartTransaction()
try:
self.db_instance.LockDb()
try:
if( not force ):
if( self.db_instance.CheckMaintenanceFlag() ):
raise MaintenanceError('Database currently under maintenance.')
audit_log_dict = self.db_instance.GetEmptyRowDict('audit_log')
audit_log_dict['action'] = u'ExportAllBindTrees'
audit_log_dict['success'] = 1
audit_rows = self.db_instance.ListRow('audit_log', audit_log_dict)
if( audit_rows ):
audit_rows = self.db_instance.ListRow(
'audit_log', self.db_instance.GetEmptyRowDict('audit_log'),
column='audit_log_timestamp',
range_values=(audit_rows[-1]['audit_log_timestamp'],
datetime.datetime.now()),
is_date=True)
for row in audit_rows:
if( row['action'] != u'ExportAllBindTrees' ):
break
else:
raise ChangesNotFoundError('No changes have been made to the '
'database since last export, '
'no export needed.')
data, raw_dump = self.GetRawData()
current_time = self.db_instance.GetCurrentTime()
finally:
self.db_instance.UnlockDb()
finally:
self.db_instance.EndTransaction()
cooked_data = self.CookData(data)
zone_view_assignments = {}
for zone_view_assignment in data['zone_view_assignments']:
if( not zone_view_assignment['zone_view_assignments_zone_name']
in zone_view_assignments):
zone_view_assignments[zone_view_assignment[
'zone_view_assignments_zone_name']] = []
zone_view_assignments[zone_view_assignment[
'zone_view_assignments_zone_name']].append(zone_view_assignment[
'zone_view_assignments_view_dependency'].split('_dep')[0])
for zone_view_assignment in zone_view_assignments:
if( zone_view_assignments[zone_view_assignment] == [u'any'] ):
raise Error('Zone "%s" has no view assignments.' %
zone_view_assignment)
record_arguments = data['record_arguments']
record_argument_definitions = self.ListRecordArgumentDefinitions(
record_arguments)
if( len(cooked_data['dns_server_sets']) == 0 ):
raise Error('No dns server sets found.')
for dns_server_set in cooked_data['dns_server_sets']:
for dns_server in cooked_data['dns_server_sets'][dns_server_set][
'dns_servers']:
dummy_config_file = StringIO.StringIO()
config_parser = ConfigParser.SafeConfigParser()
## Make Files
named_directory = '%s/%s' % (
self.root_config_dir.rstrip('/'), dns_server)
if( not os.path.exists(named_directory) ):
os.makedirs(named_directory)
dns_server_directory = ('%s/%s/named' % (
self.root_config_dir.rstrip('/'), dns_server))
if( not os.path.exists(dns_server_directory) ):
os.makedirs(dns_server_directory)
# Write server info file
bind_dir = cooked_data['dns_servers'][dns_server][
'dns_server_remote_bind_directory']
info_file_dict = {
'server_info': {
'server_name': dns_server,
'server_user': cooked_data['dns_servers'][dns_server][
'dns_server_ssh_username'],
'bind_dir': bind_dir,
'test_dir': cooked_data['dns_servers'][dns_server][
'dns_server_remote_test_directory'],
'bind_version': 'undetermined'},
'tools': {
'tar': 'True'}}
self.config_lib_instance.WriteDnsServerInfo(info_file_dict)
if( len(cooked_data['dns_server_sets'][dns_server_set][
'views']) == 0 ):
raise Error('Server set %s has no views.' % dns_server_set)
for view in cooked_data['dns_server_sets'][dns_server_set]['views']:
view_directory = '%s/%s' % (dns_server_directory, view)
if( not os.path.exists(view_directory) ):
os.makedirs(view_directory)
if( len(cooked_data['dns_server_sets'][dns_server_set]['views'][
view]['zones']) == 0 ):
raise Error('Server set %s has no zones in %s view.' % (
dns_server_set, view))
for zone in cooked_data['dns_server_sets'][dns_server_set]['views'][
view]['zones']:
if( view not in zone_view_assignments[zone] ):
continue
if(not cooked_data['dns_server_sets'][dns_server_set]['views'][
view]['zones'][zone]['records']):
continue
zone_file = '%s/%s/%s.db' % (dns_server_directory, view, zone)
zone_file_string = zone_exporter_lib.MakeZoneString(
cooked_data['dns_server_sets'][dns_server_set]['views'][view][
'zones'][zone]['records'],
cooked_data['dns_server_sets'][dns_server_set]['views'][view][
'zones'][zone]['zone_origin'],
record_argument_definitions, zone, view)
zone_file_handle = open(zone_file, 'w')
zone_file_handle.write(zone_file_string)
zone_file_handle.close()
# Write named conf files
named_conf_file = os.path.join(named_directory, 'named.conf.a')
named_conf_binary_file = os.path.join(named_directory, 'named.conf.b')
named_conf_a_file_string = self.MakeNamedConf(data, cooked_data,
dns_server_set, 'db',
bind_dir)
named_conf_b_file_string = self.MakeNamedConf(data, cooked_data,
dns_server_set, 'aa',
bind_dir, binary=True)
root_hint_file = os.path.join(named_directory, 'named/named.ca')
root_hint_file_string = open(self.root_hint_file, 'r').read()
root_hint_file_handle = open(root_hint_file, 'w')
named_conf_binary_file_handle = open(named_conf_binary_file, 'w')
named_conf_file_handle = open(named_conf_file, 'w')
try:
named_conf_file_handle.write(named_conf_a_file_string)
named_conf_binary_file_handle.write(named_conf_b_file_string)
root_hint_file_handle.write(root_hint_file_string)
finally:
named_conf_file_handle.close()
named_conf_binary_file_handle.close()
root_hint_file_handle.close()
audit_log_replay_dump, full_database_dump = self.CookRawDump(raw_dump)
success = True
finally:
log_id = self.log_instance.LogAction(u'tree_export_user',
function_name,
current_args,
success)
self.tar_file_name = '%s/dns_tree_%s-%s.tar.bz2' % (
self.backup_dir, current_time.strftime("%d_%m_%yT%H_%M"), log_id)
if( not os.path.exists(self.backup_dir) ):
os.makedirs(self.backup_dir)
audit_log_replay_dump_file = bz2.BZ2File(
'%s/audit_log_replay_dump-%s.bz2' % (self.backup_dir, log_id), 'w')
try:
for audit_index, audit_entry in enumerate(audit_log_replay_dump):
audit_log_replay_dump[audit_index] = audit_entry.encode('utf-8')
audit_log_replay_dump_file.writelines(audit_log_replay_dump)
finally:
audit_log_replay_dump_file.close()
full_dump_file = bz2.BZ2File('%s/full_database_dump-%s.bz2' %
(self.backup_dir, log_id), 'w')
try:
for full_dump_index, full_dump_entry in enumerate(full_database_dump):
full_database_dump[full_dump_index] = full_dump_entry.encode('utf-8')
full_dump_file.writelines(full_database_dump)
finally:
full_dump_file.close()
self.config_lib_instance.TarDnsTree(log_id)
def CookRawDump(self, raw_dump):
"""This takes raw data from the database and turns it into a
mysqldump-like output.
Inputs:
raw_dump: list of dictionaries that contain all of the tables
and their associated metadata
Outputs:
list: tuple of list of strings to be concatenated into mysql dump files
"""
# Stole these lines from mysqldump output, not sure all are needed
header = ['SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT;\n',
'SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS;\n',
'SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION;\n',
'SET NAMES utf8;\n'
'SET @OLD_TIME_ZONE=@@TIME_ZONE;\n',
"SET TIME_ZONE='+00:00';\n",
'SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS;\n',
'SET UNIQUE_CHECKS=0;\n',
'SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS;\n',
'SET FOREIGN_KEY_CHECKS=0;\n',
'SET @OLD_SQL_MODE=@@SQL_MODE;\n',
"SET SQL_MODE='NO_AUTO_VALUE_ON_ZERO';\n",
'SET @OLD_SQL_NOTES=@@SQL_NOTES;\n'
'SET SQL_NOTES=0;\n']
footer = ['SET SQL_MODE=@OLD_SQL_MODE;\n',
'SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;\n',
'SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;\n',
'SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT;\n',
'SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS;\n',
'SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION;\n',
'SET SQL_NOTES=@OLD_SQL_NOTES;\n']
full_database_dump = []
full_database_dump.extend(header)
audit_log_replay_dump = []
audit_log_replay_dump.extend(header)
for table_name, table_data in raw_dump.iteritems():
table_lines = []
table_lines.append('DROP TABLE IF EXISTS `%s`;\n' % table_name)
table_lines.append(table_data['schema'])
table_lines[-1] = '%s;' % table_lines[-1]
for row in table_data['rows']:
insert_row = "INSERT INTO %s (%s) VALUES (%%(%s)s);\n" % (
table_name, ','.join(table_data['columns']),
")s, %(".join(table_data['columns']))
table_lines.append(insert_row % row)
full_database_dump.extend(table_lines)
if( table_name not in constants.TABLES_NOT_AUDIT_LOGGED ):
audit_log_replay_dump.extend(table_lines)
full_database_dump.extend(footer)
audit_log_replay_dump.extend(footer)
return (audit_log_replay_dump, full_database_dump)
def ListLatestNamedConfGlobalOptions(self, data, dns_server_set,
binary=False):
"""Lists latest named.conf global options
This function is duplicated in roster-core/roster_core/core_helpers.py
Inputs:
data: data from GetRawData
dns_server_set: string of dns server set name
binary: True if named.conf file format is binary, false otherwise
Outputs:
string: string of latest named.conf global options
"""
current_timestamp = datetime.datetime.now()
smallest_time_differential = datetime.timedelta(weeks=100000)
newest_config = None
for named_config in data['named_conf_global_options']:
time_differential = current_timestamp - named_config['options_created']
if( named_config['named_conf_global_options_dns_server_set_name'] == (
dns_server_set) ):
if( time_differential < smallest_time_differential ):
smallest_time_differential = time_differential
newest_config = named_config['global_options']
if( newest_config is not None ):
deserialized_config = iscpy.Deserialize(newest_config)
if( binary ):
if( len(deserialized_config) == 0 ):
return (u'options {\n'
u'masterfile-format raw;\n'
u'};')
deserialized_part = deserialized_config.partition('options {')
new_options = '\nmasterfile-format raw; '
deserialized_config = '%s%s%s%s' % (
deserialized_part[0],
deserialized_part[1],
new_options,
deserialized_part[2])
return deserialized_config
return newest_config
def MakeNamedConf(self, data, cooked_data, dns_server_set, extension,
remote_bind_dir, binary=False):
"""Makes named.conf file strings
Inputs:
data: data from GetRawData
cooked_data: data from cooked_data
dns_server_set: string of dns_server_set
extension: bind extension, e.g., .db, .aa (binary), etc
remote_bind_dir: string of remote server bind dir
binary: True if named.conf file format is binary, false otherwise
Outputs:
string: string of named.conf file
"""
acl_dict = {}
named_conf_lines = ['#This named.conf file is autogenerated. DO NOT EDIT']
named_conf_header = self.ListLatestNamedConfGlobalOptions(
data, dns_server_set, binary)
if( named_conf_header is None ):
raise Error('Named conf global options missing for server set "%s"' % (
dns_server_set))
named_conf_header = self.NamedHeaderChangeDirectory(
named_conf_header, '%s/named' % remote_bind_dir.rstrip('/'))
named_conf_lines.append(named_conf_header)
for acl_range in data['acl_ranges']:
if( not acl_range['acl_ranges_acl_name'] in acl_dict ):
acl_dict[acl_range['acl_ranges_acl_name']] = {}
if( acl_range['acl_range_cidr_block'] is None ):
acl_dict[acl_range['acl_range_cidr_block']] = None
else:
if( not acl_range['acl_range_cidr_block'] in
acl_dict[acl_range['acl_ranges_acl_name']] ):
acl_dict[
acl_range['acl_ranges_acl_name']][acl_range[
'acl_range_cidr_block']] = {}
for acl in acl_dict:
if( acl_dict[acl] is not None and acl != 'any' ):
named_conf_lines.append('acl %s {' % acl)
for cidr in acl_dict[acl]:
named_conf_lines.append('\t%s;' % cidr)
named_conf_lines.append('};\n')
view_orders = cooked_data['dns_server_sets'][dns_server_set][
'view_order'].keys()
view_orders.sort()
for view_order in view_orders:
view_name = cooked_data['dns_server_sets'][dns_server_set][
'view_order'][view_order]
named_conf_lines.append('view "%s" {' % view_name)
clients = []
found_acl = False
for acl_name in cooked_data['dns_server_sets'][dns_server_set]['views'][
view_name]['acls']:
for view_acl_assignment in data['view_acl_assignments']:
if( view_acl_assignment['view_acl_assignments_view_name'] ==
view_name and
view_acl_assignment['view_acl_assignments_dns_server_set_name'] ==
dns_server_set and
view_acl_assignment['view_acl_assignments_acl_name'] ==
acl_name ):
found_acl = True
if( view_acl_assignment['view_acl_assignments_range_allowed'] ==
True ):
clients.append('%s;' % acl_name)
else:
clients.append('!%s;' % acl_name)
if( clients == [] and found_acl ):
clients = [u'any;']
# sort the acls with the negatives first
sorted_clients = []
for client in clients:
if( client.startswith('!') ):
sorted_clients.insert(0, client)
else:
sorted_clients.append(client)
named_conf_lines.append('\tmatch-clients { \n\t\t%s\n\t };' % (
'\n\t\t'.join(sorted_clients)))
if( cooked_data['dns_server_sets'][dns_server_set]['views'][view_name][
'view_options'] ):
named_conf_lines.append('\t%s' % cooked_data['dns_server_sets'][
dns_server_set]['views'][view_name]['view_options'])
# add root hint file
named_conf_lines.append('\tzone "." {')
named_conf_lines.append('\t\ttype hint;')
named_conf_lines.append('\t\tfile "named.ca";')
named_conf_lines.append('\t};')
for zone in cooked_data['dns_server_sets'][dns_server_set]['views'][
view_name]['zones']:
records = cooked_data['dns_server_sets'][dns_server_set]['views'][
view_name]['zones'][zone]['records']
if( cooked_data['dns_server_sets'][dns_server_set]['views'][
view_name]['zones'][zone]['zone_type'] != 'slave' ):
#If there is no SOA record for this zone, a zone file won't be generated
#so we don't put a reference to a non-existent file in named.conf
for record in records:
if record['record_type'] == u'soa':
break
else:
continue
named_conf_lines.append('\tzone "%s" {' % (
cooked_data['dns_server_sets'][dns_server_set]['views'][view_name][
'zones'][zone]['zone_origin'].rstrip('.')))
named_conf_lines.append('\t\ttype %s;' % cooked_data['dns_server_sets'][
dns_server_set]['views'][view_name]['zones'][zone]['zone_type'])
named_conf_lines.append('\t\tfile "%s/%s.%s";' % (
view_name, zone, extension))
zone_options = cooked_data['dns_server_sets'][dns_server_set]['views'][
view_name]['zones'][zone]['zone_options'].replace('\n', '\n\t\t')
named_conf_lines.append('\t\t%s' % zone_options)
named_conf_lines.append('\t};')
named_conf_lines.append('};')
return '\n'.join(named_conf_lines)
def ListACLNamesByView(self, data, view):
"""Lists acl names
Inputs:
data: data from GetRawData
view: string of view name
Outputs:
list: list of acl names ex:
['private', 'public']
"""
acl_list = []
for view_acl_assignment in data['view_acl_assignments']:
if( view_acl_assignment['view_acl_assignments_view_name'] == view and
view_acl_assignment['view_acl_assignments_acl_name'] not in
acl_list ):
acl_list.append(view_acl_assignment['view_acl_assignments_acl_name'])
return acl_list
def GetRawData(self):
"""Gets raw data from database
Outputs:
tuple of two dictionaries:
dictionary of raw data keyed by data name with values of dicts
containing values of that type's attributes
dictionary of the raw dump keyed by data name with values of
dicts containing the db dump keyed by row, column, and schema
example:
({'view_acl_assignments': ({
'view_acl_assignments_view_name': u'external',
'view_acl_assignments_dns_server_set_name': u'external_dns',
'view_acl_assignments_acl_name': u'public',
'view_acl_assignments_acl_range_allowed': 1})},
{ u'zones':
{'rows':[{}],
'columns': [u'zones_id', u'zone_name'],
'schema':(
u'CREATE TABLE `zones` (\n `zones_id` mediumint(8) ',
'unsigned NOT NULL auto_increment,\n `zone_name` varchar(255) ',
'NOT NULL,\n PRIMARY KEY (`zones_id`),\n UNIQUE KEY ;
'`zone_name` (`zone_name`),\n KEY `zone_name_1` '
'(`zone_name`)\n) ENGINE=InnoDB AUTO_INCREMENT=9 DEFAULT ',
'CHARSET=utf8')}}),
"""
data = {}
named_conf_global_options_dict = self.db_instance.GetEmptyRowDict(
'named_conf_global_options')
data['named_conf_global_options'] = self.db_instance.ListRow(
'named_conf_global_options', named_conf_global_options_dict)
dns_server_set_view_assignments_dict = self.db_instance.GetEmptyRowDict(
'dns_server_set_view_assignments')
data['dns_server_set_view_assignments'] = self.db_instance.ListRow(
'dns_server_set_view_assignments', dns_server_set_view_assignments_dict)
dns_server_set_assignments_dict = self.db_instance.GetEmptyRowDict(
'dns_server_set_assignments')
data['dns_server_set_assignments'] = self.db_instance.ListRow(
'dns_server_set_assignments', dns_server_set_assignments_dict)
dns_server_set_dict = self.db_instance.GetEmptyRowDict('dns_server_sets')
data['dns_server_sets'] = self.db_instance.ListRow('dns_server_sets',
dns_server_set_dict)
dns_servers_dict = self.db_instance.GetEmptyRowDict('dns_servers')
data['dns_servers'] = self.db_instance.ListRow('dns_servers',
dns_servers_dict)
view_dependency_assignments_dict = self.db_instance.GetEmptyRowDict(
'view_dependency_assignments')
data['view_dependency_assignments'] = self.db_instance.ListRow(
'view_dependency_assignments', view_dependency_assignments_dict)
views_dict = self.db_instance.GetEmptyRowDict('views')
data['views'] = self.db_instance.ListRow('views', views_dict)
view_acl_assignments_dict = self.db_instance.GetEmptyRowDict(
'view_acl_assignments')
data['view_acl_assignments'] = self.db_instance.ListRow(
'view_acl_assignments', view_acl_assignments_dict)
acl_ranges_dict = self.db_instance.GetEmptyRowDict('acl_ranges')
data['acl_ranges'] = self.db_instance.ListRow('acl_ranges', acl_ranges_dict)
record_arguments_records_assignments_dict = (
self.db_instance.GetEmptyRowDict(
'record_arguments_records_assignments'))
data['record_arguments_records_assignments'] = self.db_instance.ListRow(
'record_arguments_records_assignments',
record_arguments_records_assignments_dict)
records_dict = self.db_instance.GetEmptyRowDict('records')
data['records'] = self.db_instance.ListRow(
'records', records_dict, 'record_arguments_records_assignments',
record_arguments_records_assignments_dict)
zone_view_assignments_dict = self.db_instance.GetEmptyRowDict(
'zone_view_assignments')
data['zone_view_assignments'] = self.db_instance.ListRow(
'zone_view_assignments', zone_view_assignments_dict)
record_arguments_dict = self.db_instance.GetEmptyRowDict('record_arguments')
data['record_arguments'] = self.db_instance.ListRow('record_arguments',
record_arguments_dict)
raw_dump = self.db_instance.DumpDatabase()
return (data, raw_dump)
def SortRecords(self, records):
"""Sorts records for zone exporter
Inputs:
records: list of records
Outputs:
dict: dictionary keyed by tuple (zone, view_dep)
ex:
{(u'university.edu', u'internal_dep'):
{11: {'target': u'computer4', 'ttl': 3600, 'record_type': u'a',
'view_name': u'internal', 'last_user': u'sharrell',
'zone_name': u'university.edu',
u'assignment_ip': u'192.168.1.4'},
12: {u'serial_number': 20091225, u'refresh_seconds': 5,
'target': u'university.edu.',
u'name_server': u'ns1.university.edu.', u'retry_seconds': 5,
'ttl': 3600, u'minimum_seconds': 5, 'record_type': u'soa',
'view_name': u'internal', 'last_user': u'sharrell',
'zone_name': u'university.edu',
u'admin_email': u'admin@university.edu.',
u'expiry_seconds': 5}}}
"""
sorted_records = {}
for record in records:
zone_name = record['record_zone_name']
view_dep = record['record_view_dependency']
record_id = record['record_arguments_records_assignments_record_id']
arg_name = record[
'record_arguments_records_assignments_argument_name']
if( not sorted_records.has_key((zone_name, view_dep)) ):
sorted_records[(zone_name, view_dep)] = {}
if( not sorted_records[(zone_name, view_dep)].has_key(record_id) ):
sorted_records[(zone_name, view_dep)][record_id] = {}
sorted_records[(zone_name, view_dep)][record_id]['record_type'] = (
record['record_type'])
sorted_records[(zone_name, view_dep)][record_id]['zone_name'] = (
record['record_zone_name'])
sorted_records[(zone_name, view_dep)][record_id]['view_name'] = (
record['record_view_dependency'].rsplit('_dep', 1)[0])
sorted_records[(zone_name, view_dep)][record_id]['target'] = (
record['record_target'])
sorted_records[(zone_name, view_dep)][record_id]['ttl'] = (
record['record_ttl'])
sorted_records[(zone_name, view_dep)][record_id]['last_user'] = (
record['record_last_user'])
sorted_records[(zone_name, view_dep)][record_id][arg_name] = record[
'argument_value']
if( sorted_records[(zone_name, view_dep)][record_id][
arg_name].isdigit() ):
sorted_records[(zone_name, view_dep)][record_id][arg_name] = int(
sorted_records[(zone_name, view_dep)][record_id][arg_name])
return sorted_records
def CookData(self, data):
"""Cooks data for zone exporter
Inputs:
data: dictionary of raw data from database
Outputs:
dict: dictionary with dns_server_sets and dns_servers keys, then keyed by
dns_server_set and dns_server, respectively ex:
{u'dns_server_sets':
{u'external_dns':
{'dns_servers': [u'[ns1.university.edu]',
u'[dns2.university.edu]',
u'[dns3.university.edu]']}
'views':
{u'external':
{u'university.edu':
{'records': [
{u'serial_number': 20091227,
u'refresh_seconds': 5,
'target': u'university.edu.',
u'name_server': u'ns1.university.edu.',
u'retry_seconds': 5, 'ttl': 3600,
u'minimum_seconds': 5, 'record_type': u'soa',
'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'university.edu',
u'admin_email': u'admin@university.edu.',
u'expiry_seconds': 5},
{'target': u'computer1', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'university.edu',
u'assignment_ip': u'1.2.3.5'},
'zone_origin': u'example.',
'zone_type': u'master'}}}},
u'dns_servers':
{u'ns1.university.edu':
{'dns_server_remote_bind_directory': u'/etc/named/',
'dns_server_remote_test_directory': u'/etc/named/test/',
'dns_server_ssh_username': u'dchayes'},
u'dns2.university.edu':
{'dns_server_remote_bind_directory': u'/etc/bind/',
'dns_server_remote_test_directory': u'/etc/bind/test/',
'dns_server_ssh_username': u'ssh_user'}}}
u'dns3.university.edu':
{'dns_server_remote_bind_directory': u'/etc/bind/',
'dns_server_remote_test_directory': u'/etc/bind/test/',
'dns_server_ssh_username': u'dchayes'}}}
"""
cooked_data = {}
cooked_data['dns_server_sets'] = {}
cooked_data['dns_servers'] = {}
sorted_records = self.SortRecords(data['records'])
for dns_server_set in data['dns_server_sets']:
dns_server_set_name = dns_server_set['dns_server_set_name']
if( not dns_server_set_name in cooked_data['dns_server_sets'] ):
cooked_data['dns_server_sets'][dns_server_set_name] = {}
if( not 'dns_servers' in cooked_data['dns_server_sets'][
dns_server_set_name] ):
cooked_data['dns_server_sets'][dns_server_set_name]['dns_servers'] = []
if( not 'views' in cooked_data['dns_server_sets'][dns_server_set_name] ):
cooked_data['dns_server_sets'][dns_server_set_name]['views'] = {}
for dns_server_set_assignment in data['dns_server_set_assignments']:
if( dns_server_set_assignment[
'dns_server_set_assignments_dns_server_set_name'] ==
dns_server_set['dns_server_set_name'] and
dns_server_set_assignment[
'dns_server_set_assignments_dns_server_name']
not in cooked_data['dns_server_sets'][dns_server_set_name][
'dns_servers'] ):
cooked_data['dns_server_sets'][dns_server_set_name][
'dns_servers'].append(dns_server_set_assignment[
'dns_server_set_assignments_dns_server_name'])
cooked_data['dns_server_sets'][dns_server_set_name]['view_order'] = {}
for dns_server_set_view_assignment in data[
'dns_server_set_view_assignments']:
dns_server_set_name = dns_server_set_view_assignment[
'dns_server_set_view_assignments_dns_server_set_name']
view_name = dns_server_set_view_assignment[
'dns_server_set_view_assignments_view_name']
view_order = dns_server_set_view_assignment['view_order']
view_options = dns_server_set_view_assignment['view_options']
if( dns_server_set_name == dns_server_set['dns_server_set_name'] ):
cooked_data['dns_server_sets'][dns_server_set_name]['view_order'][
view_order] = view_name
for view_dependency in data['view_dependency_assignments']:
if( view_name == view_dependency[
'view_dependency_assignments_view_name'] ):
if( not view_name in cooked_data['dns_server_sets'][
dns_server_set_name]['views'] ):
cooked_data['dns_server_sets'][dns_server_set_name][
'views'][view_name] = {}
for view_names in data['views']:
if( view_names['view_name'] == view_name ):
cooked_data['dns_server_sets'][dns_server_set_name][
'views'][view_name]['view_options'] = (
iscpy.Deserialize(view_options).replace('\n', '\n\t'))
break
if( not 'acls' in cooked_data['dns_server_sets'][
dns_server_set_name]['views'][view_name] ):
cooked_data['dns_server_sets'][dns_server_set_name]['views'][
view_name][
'acls'] = self.ListACLNamesByView(data, view_name)
if( not 'zones' in cooked_data['dns_server_sets'][
dns_server_set_name]['views'][view_name] ):
cooked_data['dns_server_sets'][dns_server_set_name]['views'][
view_name]['zones'] = {}
for zone in data['zone_view_assignments']:
view_dependency_name = view_dependency[
'view_dependency_assignments_view_dependency']
zone_name = zone['zone_view_assignments_zone_name']
if( view_dependency_name == zone[
'zone_view_assignments_view_dependency'] and
((zone_name, view_dependency_name) in sorted_records or
zone['zone_view_assignments_zone_type'] == 'slave') ):
if( not zone_name in cooked_data['dns_server_sets'][
dns_server_set_name]['views'][view_name]['zones'] ):
cooked_data['dns_server_sets'][dns_server_set_name][
'views'][view_name]['zones'][zone_name] = {}
if( 'records' not in cooked_data['dns_server_sets'][
dns_server_set_name]['views'][view_name]['zones'][
zone_name] ):
cooked_data['dns_server_sets'][dns_server_set_name][
'views'][view_name]['zones'][zone_name]['records'] = []
cooked_data['dns_server_sets'][dns_server_set_name]['views'][
view_name]['zones'][zone_name]['zone_origin'] = (
punycode_lib.Uni2Puny(zone['zone_origin']))
cooked_data['dns_server_sets'][dns_server_set_name]['views'][
view_name]['zones'][zone_name][
'zone_options'] = iscpy.Deserialize(zone[
'zone_options'])
cooked_data['dns_server_sets'][dns_server_set_name]['views'][
view_name]['zones'][zone_name]['zone_type'] = zone[
'zone_view_assignments_zone_type']
# if the zone is a slave
if((zone_name, view_dependency_name) not in sorted_records):
continue
for record in sorted_records[(
zone_name, view_dependency_name)].values():
try:
record['target'] = punycode_lib.Uni2Puny(record['target'])
except (KeyError):
pass
try:
record['assignment_host'] = punycode_lib.Uni2Puny(
record['assignment_host'])
except (KeyError):
pass
cooked_data['dns_server_sets'][dns_server_set_name]['views'][
view_name]['zones'][zone_name][
'records'].extend(sorted_records[(
zone_name, view_dependency_name)].values())
# Insert dns_servers into cooked_data
for dns_server in data['dns_servers']:
dns_server_name = dns_server['dns_server_name']
if( not dns_server_name in cooked_data['dns_servers'] ):
cooked_data['dns_servers'][dns_server_name] = {}
cooked_data['dns_servers'][dns_server_name]['dns_server_ssh_username'] = (
dns_server['dns_server_ssh_username'])
cooked_data['dns_servers'][dns_server_name][
'dns_server_remote_test_directory'] = dns_server[
'dns_server_remote_test_directory']
cooked_data['dns_servers'][dns_server_name][
'dns_server_remote_bind_directory'] = dns_server[
'dns_server_remote_bind_directory']
return cooked_data
| stephenlienharrell/roster-dns-management | roster-config-manager/roster_config_manager/tree_exporter.py | Python | bsd-3-clause | 40,841 |
from datetime import datetime, timedelta
from django.db import models
class InventoryManager(models.Manager):
def get_queryset(self):
return super(InventoryManager, self).get_queryset().filter(
shipment__isnull=True, ).distinct()
def days_0_30(self):
# Example method
start_date = datetime.now() + timedelta(-30)
return super(InventoryManager, self).get_queryset().filter(
shipment__isnull=True,
batch__creation_date__gte = start_date, )
| vandorjw/django-traceability | traceability/managers/item.py | Python | bsd-3-clause | 518 |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
# FIXME: need to rework the noop stuff still
#if self.runner.noop_on_check(inject):
# # in --check mode, always skip this module execution
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
executable = self._task.args.get('executable')
result = self._low_level_execute_command(self._task.args.get('_raw_params'), tmp=tmp, executable=executable)
# for some modules (script, raw), the sudo success key
# may leak into the stdout due to the way the sudo/su
# command is constructed, so we filter that out here
if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'):
result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout'])
return result
| TeutoNet-Netzdienste/ansible | v2/ansible/plugins/action/raw.py | Python | gpl-3.0 | 1,666 |
#!/usr/bin/python
# --------------------------------------------------------------------------
# Class definition of LedController - utility functions for the LED-strip
#
# This class also takes care of updating the LEDs during idle time
# (e.g. for clock-simulation)
#
# Please edit /etc/nerd-alarmclock.conf to configure this thread
#
# Author: Bernhard Bablok
# License: GPL3
#
# Website: https://github.com/bablokb/nerd-alarmclock
#
# --------------------------------------------------------------------------
import time, threading, colorsys, json
try:
import blinkt
simulate=False
except:
# we assume our test-environment
simulate=True
class LedController(object):
""" Utility functions for the LED-strip """
BNESS = [0.0, 0.05, 0.1, 0.2, 0.4] # usable brightness-values
RED = (255,0,0)
YELLOW = (255,255,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
VIOLET = (176,52,207)
COLORS = [RED,VIOLET,GREEN,BLUE]
# initialize object ----------------------------------------------------
def __init__(self,settings):
""" Constructor """
self._settings = settings
self._lock = threading.Lock()
# set time and brightness
settings.set("_lights_off",False)
self._brightness = 2 # next time-change will overwrite this
self._day_of_month = time.localtime().tm_mday
self._set_leds()
settings.add_settings_listener('led.brightness.day',self.on_brightness)
settings.add_settings_listener('led.brightness.night',self.on_brightness)
settings.add_settings_listener('led.mode',self.on_mode)
settings.add_settings_listener('_current_date',self.on_date)
settings.add_settings_listener('_day_mode',self.on_day_mode)
settings.add_settings_listener('_led.lamp.state',self.on_lamp_state)
settings.add_settings_listener('_lights_off',self.on_lights_off)
settings.add_alarm_provider(self.get_alarm)
# save list of alarms in settings
alarms = []
for method in dir(self):
if method.startswith("alarm_"):
alarms.append(method[6:])
settings.set("_led_alarms",alarms)
# save list of modes in settings
modes = []
for method in dir(self):
if method.startswith("mode_"):
modes.append(method[5:])
settings.set("_led_modes",modes)
settings.log.msg("LedController: available modes: %r" % (modes,))
# --- set the LEDs depending on the mode -------------------------------
def _set_leds(self):
""" set LEDs dependig on the mode """
# reset brightness to saved value
self._set_brightness()
# execute current led-mode function
mode = self._settings.get("led.mode")
func_name = "mode_%s" % mode
if hasattr(self,func_name):
func = getattr(self,func_name)
self._settings.log.msg("LedController: executing %s" % func_name)
else:
func = getattr(self,"mode_dom") # fallback, should not happen
self._settings.log.msg(
"LedController: no function %s, falling back to mode_dom" % func_name)
func()
# --- mode lamp: turn on LEDs -----------------------------------------
def mode_lamp(self):
""" turn on all LEDs """
state = self._settings.get("_led.lamp.state")
if state:
self._settings.log.msg("LedController: lamp-mode: lamp on")
else:
self._settings.log.msg("LedController: lamp-mode: lamp off")
if not simulate:
rgb = self._settings.get("led.lamp.rgb")
brightness = self._settings.get("led.lamp.brightness")
if self._lock.acquire(False):
blinkt.clear()
if state:
blinkt.set_all(rgb[0],rgb[1],rgb[2],LedController.BNESS[brightness])
blinkt.show()
self._lock.release()
# --- mode dom: set the day of month ----------------------------------
def mode_dom(self):
""" show the given day of month on the led """
self._settings.log.msg("LedController: setting day of month to: %d" % self._day_of_month)
if not simulate:
num_leds = (self._day_of_month - 1) % 8 + 1
col_index = (self._day_of_month - 1) // 8
color = LedController.COLORS[col_index]
# could be during an alarm, so ignore if we can't get the lock
# (at the end of the alarm this method will be called anyhow)
if self._lock.acquire(False):
blinkt.clear()
for i in range(num_leds):
blinkt.set_pixel(i,color[0],color[1],color[2])
blinkt.show()
self._lock.release()
# --- set the brightness of the LEDs -----------------------------------
def _set_brightness(self,value=None,force=False):
""" Set the brightness of the leds """
# check if we set an explicit value or the saved default
if not force and not value is None:
self._brightness = value
# check if lights are turned off
if force:
target_brightness = value
elif self._settings.get("_lights_off"):
target_brightness = 0
else:
target_brightness = self._brightness
self._settings.log.msg(
"LedController: setting brightness to: %d" % target_brightness)
if not simulate:
# could be during an alarm, so ignore if we can't get the lock
# (at the end of the alarm this method will be called anyhow)
if self._lock.acquire(False):
# we only use off and four levels, so scale new appropriately
blinkt.set_brightness(LedController.BNESS[target_brightness])
blinkt.show()
self._lock.release()
# --- LED-mode change listener -----------------------------------------
def on_mode(self,name,old,new):
""" process LED-mode changes """
self._settings.log.msg("LedController: changing mode to %s" % new)
self._set_leds()
# --- lights_off change listener ---------------------------------------
def on_lights_off(self,name,old,new):
""" process lights_off-change events (update LEDs)"""
self._settings.log.msg("LedController: on_lights_off(%s,%s)" % (old,new))
self._set_leds()
# --- day-mode change listener -----------------------------------------
def on_day_mode(self,name,old,new):
""" process day-mode changes """
value = self._settings.get("led.brightness."+new)
self._set_brightness(value)
# --- lamp state listener ----------------------------------------------
def on_lamp_state(self,name,old,new):
""" process lamp-state changes """
# we jut call _set_leds(), since mode_lamp will pick up the new state
# if not in lamp-mode, state will be ignored
self._set_leds()
# --- date change listener ---------------------------------------------
def on_date(self,name,old,new):
""" process date-change events (e.g. change LEDs)"""
self._settings.log.msg("LedController: on_date(%s,%s)" % (old,new))
# set day-of-month
self._day_of_month = int(new.split(':')[2])
self._set_leds()
# --- brightness change listener ---------------------------------------
def on_brightness(self,name,old,new):
""" process brightness-changes """
self._settings.log.msg("LedController: on_brightness(%s,%s)" % (old,new))
self._set_brightness(new)
# --- get runnable alarm -----------------------------------------------
def get_alarm(self,nr,alarm):
""" return runnable alarm """
self._settings.log.msg("LedController: creating alarm-thread for alarm %s" % nr)
# query alarm-settings
try:
cfg = json.loads(self._settings.get("alarm.%s.led" % nr))
except:
cfg = self._settings.get("alarm.%s.led" % nr,deep=True)
if not cfg.has_key('name') or not cfg['name']:
return None
if not hasattr(self,"alarm_"+cfg['name']):
self._settings.log.msg("LedController: unsupported alarm: %s" % cfg['name'])
return None
alarm_func=getattr(self,"alarm_"+cfg['name'])
del cfg['name']
# return alarm
t = threading.Thread(target=alarm_func,args=(alarm.event,),kwargs=cfg)
return t
# sunrise simulation ---------------------------------------------------
def alarm_sunrise(self,stop_me,duration=10,delay=0):
""" sunrise simulation for the light-alarm """
duration = int(duration)
self._settings.log.msg(
"LedController: running sunrise-alarm for %s minutes" % duration)
# honor delay
delay = int(delay)
if delay > 0:
self._settings.log.msg(
"LedController: delaying alarm for %d minutes" % delay)
if stop_me.wait(60*delay):
return
if simulate:
stop_me.wait(60*duration)
self._settings.log.msg("LedController: sunrise-alarm ended")
return
# hue is in degrees
HUE_START = 0 # red
HUE_END = 50 # yellow
L_START = 0.2
delta = 1.0/(duration*60) # fraction per second
H_delta = (HUE_END - HUE_START)*delta
L_delta = (1.0-L_START)*delta
# iterate from led-brightness low->high, hue start->end,
# saturation high->low, lightness low->high
H = HUE_START
brightness = delta
S = 1.0
L = L_START
sec = 0
with self._lock:
while sec <= duration*60:
# wait one second and bail out if stopped
if stop_me.wait(1):
break
# convert HSL to RGB ...
(R,G,B) = colorsys.hls_to_rgb(H/360.0,L,S)
R = 255*R
G = 255*G
B = 255*B
# ... and show
blinkt.set_all(R,G,B,brightness)
blinkt.show()
# update values
sec = sec + 1
brightness = brightness + delta
H = H + H_delta
L = L + L_delta
# reset display to standard
self._settings.log.msg("LedController: sunrise-alarm ended")
self._set_leds()
# police signal --------------------------------------------------------
def alarm_bluelights(self,stop_me,duration=5,delay=0):
""" simulate police signal """
self._running_light(stop_me,duration,rgb=[0,0,255],delay=delay)
# fire signal ----------------------------------------------------------
def alarm_redlights(self,stop_me,duration=5,delay=0):
""" simulate fire signal """
self._running_light(stop_me,duration,rgb=[255,0,0],delay=delay)
# light signal ---------------------------------------------------------
def alarm_whitelights(self,stop_me,duration=5,delay=0):
""" simulate fire signal """
self._running_light(stop_me,duration,rgb=[255,255,255],delay=delay)
# running light --------------------------------------------------------
def _running_light(self,stop_me,duration=5,rgb=[0,0,255],delay=0):
""" running light of defined color (modified from blinkt-example larson.py) """
duration = int(duration)
self._settings.log.msg(
"LedController: executing running_light-alarm for %s minutes" % duration)
(r,g,b) = rgb
values = [(0,0,0), (0,0,0), (0,0,0), (0,0,0), (0,0,0),
(r//16,g//16,b//16),
(r//4,g//4,b//4),
(r,g,b),
(r//4,g//4,b//4),
(r//16,g//16,b//16),
(0,0,0), (0,0,0), (0,0,0), (0,0,0), (0,0,0), (0,0,0)]
start_time = time.time()
end_time = start_time + 60*duration
now = start_time
# honor delay
delay = int(delay)
if delay > 0:
self._settings.log.msg(
"LedController: delaying alarm for %d minutes" % delay)
if stop_me.wait(60*delay):
return
if simulate:
stop_me.wait(60*duration)
self._settings.log.msg("LedController: running_light-alarm finished")
else:
with self._lock:
blinkt.set_brightness(1.0)
while now <= end_time:
delta = (now - start_time) * 16
offset = int(abs((delta % 16) - 8))
for i in range(8):
(rv,gv,bv) = values[offset + i]
blinkt.set_pixel(i,rv,gv,bv)
blinkt.show()
if stop_me.wait(0.1):
break
now = time.time()
# reset display to standard
self._settings.log.msg("LedController: running_light-alarm finished")
self._set_leds()
| bablokb/nerd-alarmclock | files/usr/local/lib/python2.7/site-packages/nclock/LedController.py | Python | gpl-3.0 | 12,106 |
import wx
from gooey.gui.util import wx_util
from gooey.gui.windows.advanced_config import ConfigPanel
from gooey.gui.windows.sidebar import Sidebar
basic_config = {
'widgets': [{
'type': 'CommandField',
'required': True,
'data': {
'display_name': 'Enter Commands',
'help': 'Enter command line arguments',
'nargs': '',
'commands': '',
'choices': [],
'default': None,
}
}],
}
FLAT = 'standard'
COLUMN = 'column'
class FlatLayout(wx.Panel):
def __init__(self, *args, **kwargs):
super(FlatLayout, self).__init__(*args, **kwargs)
self.SetDoubleBuffered(True)
self.main_content = ConfigPanel(self, opt_cols=3, use_tabs=args[0].use_tabs)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.main_content, 3, wx.EXPAND)
self.SetSizer(sizer)
class ColumnLayout(wx.Panel):
def __init__(self, *args, **kwargs):
super(ColumnLayout, self).__init__(*args, **kwargs)
self.SetDoubleBuffered(True)
self.sidebar = Sidebar(self)
self.main_content = ConfigPanel(self, opt_cols=2, use_tabs=args[0].use_tabs)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.sidebar, 1, wx.EXPAND)
sizer.Add(wx_util.vertical_rule(self), 0, wx.EXPAND)
sizer.Add(self.main_content, 3, wx.EXPAND)
self.SetSizer(sizer)
| jschultz/Gooey | gooey/gui/windows/layouts.py | Python | mit | 1,386 |
"""
Django settings for server project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4e0!oxm9h_s=-c58ypaj(e^l8tp+a7oh2_y)x3p-1)#odlf&lq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["cloudcb.herokuapp.com", "localhost", "127.0.0.1", "::1"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'clipboard.apps.ClipboardConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Update database configuration with $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_RATES': {
'anon': '2/minute',
}
}
| krsoninikhil/cloud-clipboard | server/server/settings.py | Python | mit | 3,593 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'affiliations.views.affiliations', name = 'affiliations_url'),
url(r'^fetch', 'affiliations.views.fetch', name = 'aff_fetch_url'),
url(r'^add', 'affiliations.views.add', name = 'aff_add_url'),
url(r'^confirm', 'affiliations.views.confirm', name = 'aff_confirm_url'),
url(r'^resendcode', 'affiliations.views.resendcode', name = 'aff_resendcode_url'),
) | salimm/django-affiliations | build/lib/affiliations/urls.py | Python | mit | 518 |
# Copyright (c) 2013 Zelin.io
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import mock
from oslo_concurrency import processutils
from oslo_utils import importutils
from oslo_utils import units
from cinder.backup import driver as backup_driver
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import sheepdog
SHEEP_ADDR = '127.0.0.1'
SHEEP_PORT = 7000
class SheepdogDriverTestDataGenerator(object):
def __init__(self):
self.TEST_VOLUME = self._make_fake_volume(self.TEST_VOL_DATA)
self.TEST_CLONED_VOLUME = self._make_fake_volume(
self.TEST_CLONED_VOL_DATA)
self.TEST_SNAPSHOT = self._make_fake_snapshot(
self.TEST_SNAPSHOT_DATA, self.TEST_VOLUME)
self.TEST_BACKUP_VOLUME = self._make_fake_backup_volume(
self.TEST_BACKUP_VOL_DATA)
def sheepdog_cmd_error(self, cmd, exit_code, stdout, stderr):
return (('(Command: %(cmd)s) '
'(Return Code: %(exit_code)s) '
'(Stdout: %(stdout)s) '
'(Stderr: %(stderr)s)') %
{'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout.replace('\n', '\\n'),
'stderr': stderr.replace('\n', '\\n')})
def _make_fake_volume(self, volume_data):
return fake_volume.fake_volume_obj(context.get_admin_context(),
**volume_data)
def _make_fake_snapshot(self, snapshot_data, src_volume):
snapshot_obj = fake_snapshot.fake_snapshot_obj(
context.get_admin_context(), **snapshot_data)
snapshot_obj.volume = src_volume
return snapshot_obj
def _make_fake_backup_volume(self, backup_data):
return fake_backup.fake_backup_obj(context.get_admin_context(),
**backup_data)
def cmd_dog_vdi_create(self, name, size):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'create', name,
'%sG' % size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT)
def cmd_dog_vdi_delete(self, name):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', name,
'-a', SHEEP_ADDR, '-p', SHEEP_PORT)
def cmd_dog_vdi_create_snapshot(self, vdiname, snapname):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'snapshot', '-s',
snapname, '-a', SHEEP_ADDR, '-p', SHEEP_PORT, vdiname)
def cmd_dog_vdi_delete_snapshot(self, vdiname, snapname):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', '-s',
snapname, '-a', SHEEP_ADDR, '-p', SHEEP_PORT, vdiname)
def cmd_qemuimg_vdi_clone(self, src_vdiname, src_snapname, dst_vdiname,
size):
return ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-b',
'sheepdog:%(addr)s:%(port)s:%(src_vdiname)s:%(src_snapname)s' %
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT,
'src_vdiname': src_vdiname, 'src_snapname': src_snapname},
'sheepdog:%(addr)s:%(port)s:%(dst_vdiname)s' %
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT,
'dst_vdiname': dst_vdiname}, '%sG' % size)
def cmd_dog_vdi_resize(self, name, size):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'resize', name,
size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT)
def cmd_dog_vdi_list(self, name):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'list', name,
'-r', '-a', SHEEP_ADDR, '-p', SHEEP_PORT)
def cmd_dog_node_info(self):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'node', 'info',
'-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-r')
def cmd_dog_node_list(self):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'node', 'list',
'-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-r')
CMD_DOG_CLUSTER_INFO = ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'cluster',
'info', '-a', SHEEP_ADDR, '-p', SHEEP_PORT)
TEST_VOL_DATA = {
'size': 1,
'id': fake.VOLUME_ID,
'provider_auth': None,
'host': 'host@backendsec#unit_test_pool',
'project_id': fake.PROJECT_ID,
'provider_location': 'location',
'display_name': 'vol1',
'display_description': 'unit test volume',
'volume_type_id': None,
'consistencygroup_id': None,
}
TEST_CLONED_VOL_DATA = {
'size': 2,
'id': fake.VOLUME2_ID,
'provider_auth': None,
'host': 'host@backendsec#unit_test_pool',
'project_id': fake.PROJECT_ID,
'provider_location': 'location',
'display_name': 'vol3',
'display_description': 'unit test cloned volume',
'volume_type_id': None,
'consistencygroup_id': None,
}
TEST_SNAPSHOT_DATA = {
'id': fake.SNAPSHOT_ID,
}
TEST_BACKUP_VOL_DATA = {
'volume_id': fake.VOLUME_ID,
}
COLLIE_NODE_INFO = """
0 107287605248 3623897354 3%
Total 107287605248 3623897354 3% 54760833024
"""
COLLIE_NODE_LIST = """
0 127.0.0.1:7000 128 1
"""
COLLIE_VDI_LIST = """
= testvolume 0 0 0 0 1467037106 fd32fc 3
"""
COLLIE_CLUSTER_INFO_0_5 = """\
Cluster status: running
Cluster created at Tue Jun 25 19:51:41 2013
Epoch Time Version
2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002]
"""
COLLIE_CLUSTER_INFO_0_6 = """\
Cluster status: running, auto-recovery enabled
Cluster created at Tue Jun 25 19:51:41 2013
Epoch Time Version
2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002]
"""
DOG_CLUSTER_RUNNING = """\
Cluster status: running, auto-recovery enabled
Cluster created at Thu Jun 18 17:24:56 2015
Epoch Time Version [Host:Port:V-Nodes,,,]
2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128,\
127.0.0.1:7002:128]
"""
DOG_CLUSTER_INFO_TO_BE_FORMATTED = """\
Cluster status: Waiting for cluster to be formatted
"""
DOG_CLUSTER_INFO_WAITING_OTHER_NODES = """\
Cluster status: Waiting for other nodes to join cluster
Cluster created at Thu Jun 18 17:24:56 2015
Epoch Time Version [Host:Port:V-Nodes,,,]
2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128]
"""
DOG_CLUSTER_INFO_SHUTTING_DOWN = """\
Cluster status: System is shutting down
"""
DOG_VDI_CREATE_VDI_ALREADY_EXISTS = """\
Failed to create VDI %(vdiname)s: VDI exists already
"""
DOG_VDI_SNAPSHOT_VDI_NOT_FOUND = """\
Failed to create snapshot for volume-00000000-0000-0000-0000-000000000001: \
No VDI found
"""
DOG_VDI_SNAPSHOT_ALREADY_EXISTED = """\
Failed to create snapshot for volume-00000000-0000-0000-0000-000000000001, \
maybe snapshot id (0) or tag (snapshot-00000000-0000-0000-0000-000000000002) \
is existed
"""
DOG_VDI_SNAPSHOT_TAG_NOT_FOUND = """\
Failed to open VDI volume-00000000-0000-0000-0000-000000000001 \
(snapshot id: 0 snapshot tag: snapshot-00000000-0000-0000-0000-000000000002): \
Failed to find requested tag
"""
DOG_VDI_SNAPSHOT_VOLUME_NOT_FOUND = """\
Failed to open VDI volume-00000000-0000-0000-0000-000000000001 \
(snapshot id: 0 snapshot tag: snapshot-00000000-0000-0000-0000-000000000002): \
No VDI found
"""
DOG_VDI_RESIZE_SIZE_SHRINK = """\
Shrinking VDIs is not implemented
"""
DOG_VDI_RESIZE_TOO_LARGE = """\
New VDI size is too large. This volume's max size is 4398046511104
"""
DOG_COMMAND_ERROR_VDI_NOT_EXISTS = """\
Failed to open VDI %(vdiname)s (snapshot id: 0 snapshot tag: ): No VDI found
"""
DOG_COMMAND_ERROR_FAIL_TO_CONNECT = """\
failed to connect to 127.0.0.1:7000: Connection refused
failed to connect to 127.0.0.1:7000: Connection refused
Failed to get node list
"""
QEMU_IMG_VDI_ALREADY_EXISTS = """\
qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000001: \
VDI exists already,
"""
QEMU_IMG_VDI_NOT_FOUND = """\
qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000003: \
cannot get vdi info, No vdi found, \
volume-00000000-0000-0000-0000-000000000001 \
snapshot-00000000-0000-0000-0000-000000000002
"""
QEMU_IMG_SNAPSHOT_NOT_FOUND = """\
qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000003: \
cannot get vdi info, Failed to find the requested tag, \
volume-00000000-0000-0000-0000-000000000001 \
snapshot-00000000-0000-0000-0000-000000000002
"""
QEMU_IMG_SIZE_TOO_LARGE = """\
qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000001: \
An image is too large. The maximum image size is 4096GB
"""
QEMU_IMG_FAILED_TO_CONNECT = """\
qemu-img: sheepdog::volume-00000000-0000-0000-0000-000000000001: \
Failed to connect socket: Connection refused
"""
class FakeImageService(object):
def download(self, context, image_id, path):
pass
class SheepdogIOWrapperTestCase(test.TestCase):
def setUp(self):
super(SheepdogIOWrapperTestCase, self).setUp()
self.volume = {'name': 'volume-2f9b2ff5-987b-4412-a91c-23caaf0d5aff'}
self.snapshot_name = 'snapshot-bf452d80-068a-43d7-ba9f-196cf47bd0be'
self.vdi_wrapper = sheepdog.SheepdogIOWrapper(
SHEEP_ADDR, SHEEP_PORT, self.volume)
self.snapshot_wrapper = sheepdog.SheepdogIOWrapper(
SHEEP_ADDR, SHEEP_PORT, self.volume, self.snapshot_name)
self.execute = mock.MagicMock()
self.mock_object(processutils, 'execute', self.execute)
def test_init(self):
self.assertEqual(self.volume['name'], self.vdi_wrapper._vdiname)
self.assertIsNone(self.vdi_wrapper._snapshot_name)
self.assertEqual(0, self.vdi_wrapper._offset)
self.assertEqual(self.snapshot_name,
self.snapshot_wrapper._snapshot_name)
def test_execute(self):
cmd = ('cmd1', 'arg1')
data = 'data1'
self.vdi_wrapper._execute(cmd, data)
self.execute.assert_called_once_with(*cmd, process_input=data)
def test_execute_error(self):
cmd = ('cmd1', 'arg1')
data = 'data1'
self.mock_object(processutils, 'execute',
mock.MagicMock(side_effect=OSError))
args = (cmd, data)
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper._execute,
*args)
def test_read_vdi(self):
self.vdi_wrapper.read()
self.execute.assert_called_once_with(
'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT,
self.volume['name'], 0, process_input=None)
def test_read_vdi_invalid(self):
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.read)
def test_write_vdi(self):
data = 'data1'
self.vdi_wrapper.write(data)
self.execute.assert_called_once_with(
'dog', 'vdi', 'write', '-a', SHEEP_ADDR, '-p', SHEEP_PORT,
self.volume['name'], 0, len(data),
process_input=data)
self.assertEqual(len(data), self.vdi_wrapper.tell())
def test_write_vdi_invalid(self):
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.write, 'dummy_data')
def test_read_snapshot(self):
self.snapshot_wrapper.read()
self.execute.assert_called_once_with(
'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT,
'-s', self.snapshot_name, self.volume['name'], 0,
process_input=None)
def test_seek(self):
self.vdi_wrapper.seek(12345)
self.assertEqual(12345, self.vdi_wrapper.tell())
self.vdi_wrapper.seek(-2345, whence=1)
self.assertEqual(10000, self.vdi_wrapper.tell())
# This results in negative offset.
self.assertRaises(IOError, self.vdi_wrapper.seek, -20000, whence=1)
def test_seek_invalid(self):
seek_num = 12345
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.seek, seek_num)
def test_flush(self):
# flush does nothing.
self.vdi_wrapper.flush()
self.assertFalse(self.execute.called)
def test_fileno(self):
self.assertRaises(IOError, self.vdi_wrapper.fileno)
class SheepdogClientTestCase(test.TestCase):
def setUp(self):
super(SheepdogClientTestCase, self).setUp()
self._cfg = conf.Configuration(None)
self._cfg.sheepdog_store_address = SHEEP_ADDR
self._cfg.sheepdog_store_port = SHEEP_PORT
self.driver = sheepdog.SheepdogDriver(configuration=self._cfg)
db_driver = self.driver.configuration.db_driver
self.db = importutils.import_module(db_driver)
self.driver.db = self.db
self.driver.do_setup(None)
self.test_data = SheepdogDriverTestDataGenerator()
node_list = [SHEEP_ADDR]
self.client = sheepdog.SheepdogClient(node_list, SHEEP_PORT)
self._addr = SHEEP_ADDR
self._port = SHEEP_PORT
self._vdiname = self.test_data.TEST_VOLUME.name
self._vdisize = self.test_data.TEST_VOLUME.size
self._src_vdiname = self.test_data.TEST_SNAPSHOT.volume_name
self._snapname = self.test_data.TEST_SNAPSHOT.name
self._dst_vdiname = self.test_data.TEST_CLONED_VOLUME.name
self._dst_vdisize = self.test_data.TEST_CLONED_VOLUME.size
@mock.patch.object(utils, 'execute')
def test_run_dog_success(self, fake_execute):
args = ('cluster', 'info')
expected_cmd = self.test_data.CMD_DOG_CLUSTER_INFO
fake_execute.return_value = ('', '')
self.client._run_dog(*args)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_command_not_found(self, fake_logger, fake_execute):
args = ('cluster', 'info')
expected_msg = 'No such file or directory'
expected_errno = errno.ENOENT
fake_execute.side_effect = OSError(expected_errno, expected_msg)
self.assertRaises(OSError, self.client._run_dog, *args)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_operation_not_permitted(self, fake_logger, fake_execute):
args = ('cluster', 'info')
expected_msg = 'Operation not permitted'
expected_errno = errno.EPERM
fake_execute.side_effect = OSError(expected_errno, expected_msg)
self.assertRaises(OSError, self.client._run_dog, *args)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_fail_to_connect(self, fake_logger, fake_execute):
args = ('cluster', 'info')
cmd = self.test_data.CMD_DOG_CLUSTER_INFO
exit_code = 2
stdout = 'stdout dummy'
stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT
expected_reason = (_('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s'),
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT})
fake_execute.side_effect = processutils.ProcessExecutionError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client._run_dog, *args)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_fail_to_connect_bugcase(self, fake_logger, fake_execute):
# NOTE(zhangsong): Sheepdog's bug case.
# details are written to Sheepdog driver code.
args = ('node', 'list')
stdout = ''
stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT
expected_reason = (_('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s'),
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT})
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client._run_dog, *args)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_unknown_error(self, fake_logger, fake_execute):
args = ('cluster', 'info')
cmd = self.test_data.CMD_DOG_CLUSTER_INFO
exit_code = 1
stdout = 'stdout dummy'
stderr = 'stderr dummy'
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = processutils.ProcessExecutionError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogCmdError,
self.client._run_dog, *args)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(utils, 'execute')
def test_run_qemu_img_success(self, fake_execute):
# multiple part of args match the prefix and
# volume name is matched the prefix unfortunately
expected_cmd = ('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'create', '-b',
'sheepdog:%(addr)s:%(port)s:sheepdog:snap' %
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT},
'sheepdog:%(addr)s:%(port)s:clone' %
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT}, '10G')
fake_execute.return_value = ('', '')
self.client._run_qemu_img('create', '-b', 'sheepdog:sheepdog:snap',
'sheepdog:clone', '10G')
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_qemu_img_command_not_found(self, fake_logger, fake_execute):
args = ('create', 'dummy')
expected_msg = 'No such file or directory'
expected_errno = errno.ENOENT
fake_execute.side_effect = OSError(expected_errno, expected_msg)
self.assertRaises(OSError, self.client._run_qemu_img, *args)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_qemu_img_unknown_os_error(self, fake_logger, fake_execute):
args = ('create', 'dummy')
expected_msg = 'unknown'
expected_errno = errno.EPERM
fake_execute.side_effect = OSError(expected_errno, expected_msg)
self.assertRaises(OSError, self.client._run_qemu_img, *args)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_qemu_img_fail_to_connect(self, fake_logger, fake_execute):
args = ('create', 'dummy')
cmd = ('qemu-img', 'create', 'dummy')
exit_code = 1
stdout = 'stdout dummy'
stderr = self.test_data.QEMU_IMG_FAILED_TO_CONNECT
expected_reason = (_('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s'),
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT})
fake_execute.side_effect = processutils.ProcessExecutionError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client._run_qemu_img, *args)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_qemu_img_unknown_execution_error(self, fake_logger,
fake_execute):
args = ('create', 'dummy')
cmd = ('qemu-img', 'create', 'dummy')
exit_code = 1
stdout = 'stdout dummy'
stderr = 'stderr dummy'
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = processutils.ProcessExecutionError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogCmdError,
self.client._run_qemu_img, *args)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_success(self, fake_logger, fake_execute):
stdout = self.test_data.DOG_CLUSTER_RUNNING
stderr = ''
expected_cmd = ('cluster', 'info')
fake_execute.return_value = (stdout, stderr)
self.client.check_cluster_status()
fake_execute.assert_called_once_with(*expected_cmd)
self.assertTrue(fake_logger.debug.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_check_cluster_status_v0_5(self, fake_execute):
stdout = self.test_data.COLLIE_CLUSTER_INFO_0_5
stderr = ''
fake_execute.return_value = (stdout, stderr)
self.client.check_cluster_status()
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_check_cluster_status_v0_6(self, fake_execute):
stdout = self.test_data.COLLIE_CLUSTER_INFO_0_6
stderr = ''
fake_execute.return_value = (stdout, stderr)
self.client.check_cluster_status()
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_not_formatted(self, fake_logger,
fake_execute):
stdout = self.test_data.DOG_CLUSTER_INFO_TO_BE_FORMATTED
stderr = ''
expected_reason = _('Cluster is not formatted. '
'You should probably perform '
'"dog cluster format".')
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.check_cluster_status)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_waiting_to_join_cluster(self, fake_logger,
fake_execute):
stdout = self.test_data.DOG_CLUSTER_INFO_WAITING_OTHER_NODES
stderr = ''
expected_reason = _('Waiting for all nodes to join cluster. '
'Ensure all sheep daemons are running.')
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.check_cluster_status)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_shutting_down(self, fake_logger,
fake_execute):
stdout = self.test_data.DOG_CLUSTER_INFO_SHUTTING_DOWN
stderr = ''
expected_reason = _('Invalid sheepdog cluster status.')
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.check_cluster_status)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_unknown_error(self, fake_logger,
fake_execute):
cmd = self.test_data.CMD_DOG_CLUSTER_INFO
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stdout_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.check_cluster_status)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_create_success(self, fake_execute):
expected_cmd = ('vdi', 'create', self._vdiname, '%sG' % self._vdisize)
fake_execute.return_value = ('', '')
self.client.create(self._vdiname, self._vdisize)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_vdi_already_exists(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize)
exit_code = 1
stdout = ''
stderr = (self.test_data.DOG_VDI_CREATE_VDI_ALREADY_EXISTS %
{'vdiname': self._vdiname})
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.create,
self._vdiname, self._vdisize)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize)
exit_code = 1
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.create,
self._vdiname, self._vdisize)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_delete_success(self, fake_execute):
expected_cmd = ('vdi', 'delete', self._vdiname)
fake_execute.return_value = ('', '')
self.client.delete(self._vdiname)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_vdi_not_found(self, fake_logger, fake_execute):
stdout = ''
stderr = (self.test_data.DOG_COMMAND_ERROR_VDI_NOT_EXISTS %
{'vdiname': self._vdiname})
fake_execute.return_value = (stdout, stderr)
self.client.delete(self._vdiname)
self.assertTrue(fake_logger.warning.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_delete(self._vdiname)
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.delete, self._vdiname)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_create_snapshot_success(self, fake_execute):
args = (self._src_vdiname, self._snapname)
expected_cmd = ('vdi', 'snapshot', '-s', self._snapname,
self._src_vdiname)
fake_execute.return_value = ('', '')
self.client.create_snapshot(*args)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_snapshot_vdi_not_found(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname)
cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args)
exit_code = 1
stdout = ''
stderr = self.test_data.DOG_VDI_SNAPSHOT_VDI_NOT_FOUND
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.create_snapshot, *args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_snapshot_snap_name_already_used(self, fake_logger,
fake_execute):
args = (self._src_vdiname, self._snapname)
cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args)
exit_code = 1
stdout = 'stdout_dummy'
stderr = self.test_data.DOG_VDI_SNAPSHOT_ALREADY_EXISTED
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.create_snapshot, *args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_snapshot_unknown_error(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname)
cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args)
exit_code = 1
stdout = 'stdout_dummy'
stderr = 'unknown_error'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.create_snapshot, *args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_snapshot_success(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname)
expected_cmd = ('vdi', 'delete', '-s', self._snapname,
self._src_vdiname)
fake_execute.return_value = ('', '')
self.client.delete_snapshot(*args)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_snapshot_not_found(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname)
stdout = ''
stderr = self.test_data.DOG_VDI_SNAPSHOT_TAG_NOT_FOUND
fake_execute.return_value = (stdout, stderr)
self.client.delete_snapshot(*args)
self.assertTrue(fake_logger.warning.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_snapshot_vdi_not_found(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname)
stdout = ''
stderr = self.test_data.DOG_VDI_SNAPSHOT_VOLUME_NOT_FOUND
fake_execute.return_value = (stdout, stderr)
self.client.delete_snapshot(*args)
self.assertTrue(fake_logger.warning.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_snapshot_unknown_error(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname)
cmd = self.test_data.cmd_dog_vdi_delete_snapshot(*args)
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'unknown_error'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.delete_snapshot, *args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img')
def test_clone_success(self, fake_execute):
args = (self._src_vdiname, self._snapname,
self._dst_vdiname, self._dst_vdisize)
src_volume = 'sheepdog:%(src_vdiname)s:%(snapname)s' % {
'src_vdiname': self._src_vdiname, 'snapname': self._snapname}
dst_volume = 'sheepdog:%s' % self._dst_vdiname
expected_cmd = ('create', '-b', src_volume, dst_volume,
'%sG' % self._dst_vdisize)
fake_execute.return_code = ("", "")
self.client.clone(*args)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img')
@mock.patch.object(sheepdog, 'LOG')
def test_clone_dst_vdi_already_exists(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname,
self._dst_vdiname, self._dst_vdisize)
cmd = self.test_data.cmd_qemuimg_vdi_clone(*args)
exit_code = 2
stdout = 'stdout_dummy'
stderr = self.test_data.QEMU_IMG_VDI_ALREADY_EXISTS
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone,
*args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img')
@mock.patch.object(sheepdog, 'LOG')
def test_clone_src_vdi_not_found(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname,
self._dst_vdiname, self._dst_vdisize)
cmd = self.test_data.cmd_qemuimg_vdi_clone(*args)
exit_code = 2
stdout = 'stdout_dummy'
stderr = self.test_data.QEMU_IMG_VDI_NOT_FOUND
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone,
*args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img')
@mock.patch.object(sheepdog, 'LOG')
def test_clone_src_snapshot_not_found(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname,
self._dst_vdiname, self._dst_vdisize)
cmd = self.test_data.cmd_qemuimg_vdi_clone(*args)
exit_code = 2
stdout = 'stdout_dummy'
stderr = self.test_data.QEMU_IMG_SNAPSHOT_NOT_FOUND
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone,
*args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img')
@mock.patch.object(sheepdog, 'LOG')
def test_clone_too_large_volume_size(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname,
self._dst_vdiname, self._dst_vdisize)
cmd = self.test_data.cmd_qemuimg_vdi_clone(*args)
exit_code = 2
stdout = 'stdout_dummy'
stderr = self.test_data.QEMU_IMG_SIZE_TOO_LARGE
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone,
*args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img')
@mock.patch.object(sheepdog, 'LOG')
def test_clone_unknown_error(self, fake_logger, fake_execute):
args = (self._src_vdiname, self._snapname,
self._dst_vdiname, self._dst_vdisize)
cmd = self.test_data.cmd_qemuimg_vdi_clone(*args)
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone,
*args)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_resize_success(self, fake_execute):
expected_cmd = ('vdi', 'resize', self._vdiname, 10 * 1024 ** 3)
fake_execute.return_value = ('', '')
self.client.resize(self._vdiname, 10)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_resize_vdi_not_found(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 10 * 1024 ** 3)
exit_code = 1
stdout = 'stdout_dummy'
stderr = (self.test_data.DOG_COMMAND_ERROR_VDI_NOT_EXISTS %
{'vdiname': self._vdiname})
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.resize, self._vdiname, 1)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_resize_shrinking_not_supported(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 1 * 1024 ** 3)
exit_code = 1
stdout = 'stdout_dummy'
stderr = self.test_data.DOG_VDI_RESIZE_SIZE_SHRINK
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.resize, self._vdiname, 1)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_resize_too_large_size(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 5 * 1024 ** 4)
exit_code = 64
stdout = 'stdout_dummy'
stderr = self.test_data.DOG_VDI_RESIZE_TOO_LARGE
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.resize, self._vdiname, 5120)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_resize_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 10 * 1024 ** 3)
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.resize, self._vdiname, 10)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_get_volume_stats_success(self, fake_execute):
expected_cmd = ('node', 'info', '-r')
fake_execute.return_value = (self.test_data.COLLIE_NODE_INFO, '')
self.client.get_volume_stats()
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_get_volume_stats_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_node_info()
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.get_volume_stats)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_get_vdi_info_success(self, fake_execute):
expected_cmd = ('vdi', 'list', self._vdiname, '-r')
fake_execute.return_value = (self.test_data.COLLIE_VDI_LIST, '')
self.client.get_vdi_info(self._vdiname)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_get_vdi_info_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_list(self._vdiname)
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.get_vdi_info, self._vdiname)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_update_node_list_success(self, fake_execute):
expected_cmd = ('node', 'list', '-r')
fake_execute.return_value = (self.test_data.COLLIE_NODE_LIST, '')
self.client.update_node_list()
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_update_node_list_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_node_list()
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.update_node_list)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
class SheepdogDriverTestCase(test.TestCase):
def setUp(self):
super(SheepdogDriverTestCase, self).setUp()
self._cfg = conf.Configuration(None)
self._cfg.sheepdog_store_address = SHEEP_ADDR
self._cfg.sheepdog_store_port = SHEEP_PORT
self.driver = sheepdog.SheepdogDriver(configuration=self._cfg)
db_driver = self.driver.configuration.db_driver
self.db = importutils.import_module(db_driver)
self.driver.db = self.db
self.driver.do_setup(None)
self.test_data = SheepdogDriverTestDataGenerator()
self.client = self.driver.client
self._addr = SHEEP_ADDR
self._port = SHEEP_PORT
self._vdiname = self.test_data.TEST_VOLUME.name
self._vdisize = self.test_data.TEST_VOLUME.size
self._src_vdiname = self.test_data.TEST_SNAPSHOT.volume_name
self._snapname = self.test_data.TEST_SNAPSHOT.name
self._dst_vdiname = self.test_data.TEST_CLONED_VOLUME.name
self._dst_vdisize = self.test_data.TEST_CLONED_VOLUME.size
@mock.patch.object(sheepdog.SheepdogClient, 'update_node_list')
@mock.patch.object(sheepdog.SheepdogClient, 'check_cluster_status')
def test_check_for_setup_error(self, fake_check, fake_update):
self.driver.check_for_setup_error()
fake_check.assert_called_once_with()
fake_update.assert_called_once_with()
@mock.patch.object(sheepdog.SheepdogClient, 'create')
def test_create_volume(self, fake_execute):
self.driver.create_volume(self.test_data.TEST_VOLUME)
fake_execute.assert_called_once_with(self._vdiname, self._vdisize)
@mock.patch.object(sheepdog.SheepdogClient, 'delete')
def test_delete_volume(self, fake_execute):
self.driver.delete_volume(self.test_data.TEST_VOLUME)
fake_execute.assert_called_once_with(self._vdiname)
@mock.patch.object(sheepdog.SheepdogClient, 'get_volume_stats')
def test_update_volume_stats(self, fake_execute):
fake_execute.return_value = self.test_data.COLLIE_NODE_INFO
expected = dict(
volume_backend_name='sheepdog',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='sheepdog',
total_capacity_gb=float(107287605248) / units.Gi,
free_capacity_gb=float(107287605248 - 3623897354) / units.Gi,
reserved_percentage=0,
QoS_support=False)
actual = self.driver.get_volume_stats(True)
self.assertDictMatch(expected, actual)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_copy_image_to_volume(self, fake_run_dog):
@contextlib.contextmanager
def fake_temp_file():
class FakeTmp(object):
def __init__(self, name):
self.name = name
yield FakeTmp('test').name
def fake_try_execute(obj, *command, **kwargs):
return True
self.mock_object(image_utils, 'temporary_file', fake_temp_file)
self.mock_object(image_utils, 'fetch_verify_image',
return_value=None)
self.mock_object(image_utils, 'convert_image',
return_value=None)
self.mock_object(sheepdog.SheepdogDriver, '_try_execute',
fake_try_execute)
fake_run_dog.return_value = ('fake_stdout', 'fake_stderr')
self.driver.copy_image_to_volume(None, self.test_data.TEST_VOLUME,
FakeImageService(), None)
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.temporary_file')
def test_copy_volume_to_image(self, mock_temp, mock_open):
fake_context = {}
fake_volume = {'name': 'volume-00000001'}
fake_image_service = mock.Mock()
fake_image_service_update = mock.Mock()
fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
temp_file = mock_temp.return_value.__enter__.return_value
patch = mock.patch.object
with patch(self.driver, '_try_execute') as fake_try_execute:
with patch(fake_image_service,
'update') as fake_image_service_update:
self.driver.copy_volume_to_image(fake_context,
fake_volume,
fake_image_service,
fake_image_meta)
expected_cmd = ('qemu-img',
'convert',
'-f', 'raw',
'-t', 'none',
'-O', 'raw',
'sheepdog:%s:%s:%s' % (
self._addr,
self._port,
fake_volume['name']),
mock.ANY)
mock_open.assert_called_once_with(temp_file, 'rb')
fake_try_execute.assert_called_once_with(*expected_cmd)
fake_image_service_update.assert_called_once_with(
fake_context, fake_image_meta['id'], mock.ANY, mock.ANY)
@mock.patch('os.makedirs')
def test_copy_volume_to_image_nonexistent_volume(self, mock_make):
fake_context = {}
fake_volume = {
'name': 'nonexistent-volume-82c4539e-c2a5-11e4-a293-0aa186c60fe0'}
fake_image_service = mock.Mock()
fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
# The command is expected to fail, so we don't want to retry it.
self.driver._try_execute = self.driver._execute
args = (fake_context, fake_volume, fake_image_service, fake_image_meta)
expected_errors = (processutils.ProcessExecutionError, OSError)
self.assertRaises(expected_errors,
self.driver.copy_volume_to_image,
*args)
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(sheepdog.SheepdogClient, 'clone')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_create_cloned_volume(self, fake_delete_snapshot,
fake_clone, fake_create_snapshot):
src_vol = self.test_data.TEST_VOLUME
cloned_vol = self.test_data.TEST_CLONED_VOLUME
self.driver.create_cloned_volume(cloned_vol, src_vol)
snapshot_name = src_vol.name + '-temp-snapshot'
fake_create_snapshot.assert_called_once_with(src_vol.name,
snapshot_name)
fake_clone.assert_called_once_with(src_vol.name, snapshot_name,
cloned_vol.name, cloned_vol.size)
fake_delete_snapshot.assert_called_once_with(src_vol.name,
snapshot_name)
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(sheepdog.SheepdogClient, 'clone')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
@mock.patch.object(sheepdog, 'LOG')
def test_create_cloned_volume_failure(self, fake_logger,
fake_delete_snapshot,
fake_clone, fake_create_snapshot):
src_vol = self.test_data.TEST_VOLUME
cloned_vol = self.test_data.TEST_CLONED_VOLUME
snapshot_name = src_vol.name + '-temp-snapshot'
fake_clone.side_effect = exception.SheepdogCmdError(
cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy')
self.assertRaises(exception.SheepdogCmdError,
self.driver.create_cloned_volume,
cloned_vol, src_vol)
fake_delete_snapshot.assert_called_once_with(src_vol.name,
snapshot_name)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
def test_create_snapshot(self, fake_create_snapshot):
snapshot = self.test_data.TEST_SNAPSHOT
self.driver.create_snapshot(snapshot)
fake_create_snapshot.assert_called_once_with(snapshot.volume_name,
snapshot.name)
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_delete_snapshot(self, fake_delete_snapshot):
snapshot = self.test_data.TEST_SNAPSHOT
self.driver.delete_snapshot(snapshot)
fake_delete_snapshot.assert_called_once_with(snapshot.volume_name,
snapshot.name)
def test_clone_image_success(self):
context = {}
image_id = "caa4ffd0-fake-fake-fake-f8631a807f5a"
image_location = ('sheepdog://192.168.1.111:7000:%s' % image_id, None)
image_meta = {'id': image_id, 'size': 1, 'disk_format': 'raw'}
image_service = ''
patch = mock.patch.object
with patch(self.driver, '_is_cloneable', return_value=True):
with patch(self.driver, 'create_cloned_volume'):
with patch(self.client, 'resize'):
model_updated, cloned = self.driver.clone_image(
context, self.test_data.TEST_CLONED_VOLUME,
image_location, image_meta, image_service)
self.assertTrue(cloned)
self.assertEqual("sheepdog:%s:%s:%s" % (
self._addr,
self._port,
self.test_data.TEST_CLONED_VOLUME.name),
model_updated['provider_location'])
def test_clone_image_failure(self):
context = {}
fake_vol = {}
image_location = ('image_location', None)
image_meta = {}
image_service = ''
with mock.patch.object(self.driver, '_is_cloneable',
lambda *args: False):
result = self.driver.clone_image(
context, fake_vol, image_location, image_meta, image_service)
self.assertEqual(({}, False), result)
def test_is_cloneable(self):
uuid = '87f1b01c-f46c-4537-bd5d-23962f5f4316'
location = 'sheepdog://127.0.0.1:7000:%s' % uuid
image_meta = {'id': uuid, 'size': 1, 'disk_format': 'raw'}
invalid_image_meta = {'id': uuid, 'size': 1, 'disk_format': 'iso'}
with mock.patch.object(self.client, 'get_vdi_info') as fake_execute:
fake_execute.return_value = self.test_data.COLLIE_VDI_LIST
self.assertTrue(
self.driver._is_cloneable(location, image_meta))
# Test for invalid location
self.assertFalse(
self.driver._is_cloneable('invalid-location', image_meta))
# Test for image not exist in sheepdog cluster
fake_execute.return_value = ''
self.assertFalse(
self.driver._is_cloneable(location, image_meta))
# Test for invalid image meta
self.assertFalse(
self.driver._is_cloneable(location, invalid_image_meta))
def test_create_volume_from_snapshot(self):
dst_volume = self.test_data.TEST_CLONED_VOLUME
snapshot = self.test_data.TEST_SNAPSHOT
with mock.patch.object(self.client, 'clone') as fake_execute:
self.driver.create_volume_from_snapshot(dst_volume, snapshot)
fake_execute.assert_called_once_with(self._src_vdiname,
self._snapname,
self._dst_vdiname,
self._dst_vdisize)
def test_initialize_connection(self):
fake_volume = self.test_data.TEST_VOLUME
expected = {
'driver_volume_type': 'sheepdog',
'data': {
'name': fake_volume.name,
'hosts': ["127.0.0.1"],
'ports': ["7000"],
}
}
actual = self.driver.initialize_connection(fake_volume, None)
self.assertDictMatch(expected, actual)
@mock.patch.object(sheepdog.SheepdogClient, 'resize')
@mock.patch.object(sheepdog, 'LOG')
def test_extend_volume(self, fake_logger, fake_execute):
self.driver.extend_volume(self.test_data.TEST_VOLUME, 10)
fake_execute.assert_called_once_with(self._vdiname, 10)
self.assertTrue(fake_logger.debug.called)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_backup_volume_success(self, fake_delete_snapshot,
fake_backup_service, fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_volume = self.test_data.TEST_VOLUME
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_backup_service = mock.Mock()
fake_volume_get.return_value = fake_volume
self.driver.backup_volume(fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(2, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
call_args, call_kwargs = fake_backup_service.backup.call_args
call_backup, call_sheepdog_fd = call_args
self.assertEqual(fake_backup, call_backup)
self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_backup_volume_fail_to_create_snap(self, fake_delete_snapshot,
fake_backup_service,
fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_volume = self.test_data.TEST_VOLUME
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_volume_get.return_value = fake_volume
fake_create_snapshot.side_effect = exception.SheepdogCmdError(
cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy')
self.assertRaises(exception.SheepdogError,
self.driver.backup_volume,
fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(1, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_backup_volume_fail_to_backup_vol(self, fake_delete_snapshot,
fake_backup_service,
fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_volume = self.test_data.TEST_VOLUME
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_volume_get.return_value = fake_volume
class BackupError(Exception):
pass
fake_backup_service.backup.side_effect = BackupError()
self.assertRaises(BackupError,
self.driver.backup_volume,
fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(2, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
@mock.patch.object(backup_driver, 'BackupDriver')
def test_restore_backup(self, fake_backup_service):
fake_context = {}
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_volume = self.test_data.TEST_VOLUME
self.driver.restore_backup(
fake_context, fake_backup, fake_volume, fake_backup_service)
call_args, call_kwargs = fake_backup_service.restore.call_args
call_backup, call_volume_id, call_sheepdog_fd = call_args
self.assertEqual(fake_backup, call_backup)
self.assertEqual(fake_volume.id, call_volume_id)
self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper)
| Nexenta/cinder | cinder/tests/unit/volume/drivers/test_sheepdog.py | Python | apache-2.0 | 68,464 |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2006 Jorgen Stenarson. <jorgen.stenarson@bostream.nu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from __future__ import print_function, unicode_literals, absolute_import
import re, operator
def str_find_all(str, ch):
result = []
index = 0
while index >= 0:
index = str.find(ch, index)
if index >= 0:
result.append(index)
index += 1
return result
word_pattern = re.compile("(x*)")
def markwords(str, iswordfun):
markers = {True: "x", False: "o"}
return "".join([markers[iswordfun(ch)] for ch in str])
def split_words(str, iswordfun):
return [x for x in word_pattern.split(markwords(str, iswordfun))
if x != ""]
def mark_start_segment(str, is_segment):
def mark_start(s):
if s[0:1] == "x":
return "s" + s[1:]
else:
return s
return "".join(map(mark_start, split_words(str, is_segment)))
def mark_end_segment(str, is_segment):
def mark_start(s):
if s[0:1] == "x":
return s[:-1] + "s"
else:
return s
return "".join(map(mark_start, split_words(str, is_segment)))
def mark_start_segment_index(str, is_segment):
return str_find_all(mark_start_segment(str, is_segment), "s")
def mark_end_segment_index(str, is_segment):
return [x + 1 for x in str_find_all(
mark_end_segment(str, is_segment), "s")]
################ Following are used in lineobj ###########################
def is_word_token(str):
return not is_non_word_token(str)
def is_non_word_token(str):
if len(str) != 1 or str in " \t\n":
return True
else:
return False
def next_start_segment(str, is_segment):
str = "".join(str)
result = []
for start in mark_start_segment_index(str, is_segment):
result[len(result):start] = [start for x in range(start - len(result))]
result[len(result):len(str)] = [len(str)
for x in range(len(str) - len(result) + 1)]
return result
def next_end_segment(str, is_segment):
str = "".join(str)
result = []
for start in mark_end_segment_index(str, is_segment):
result[len(result):start] = [start for x in range(start - len(result))]
result[len(result):len(str)] = [len(str)
for x in range(len(str) - len(result) + 1)]
return result
def prev_start_segment(str, is_segment):
str = "".join(str)
result = []
prev = 0
for start in mark_start_segment_index(str, is_segment):
result[len(result):start + 1] = [
prev for x in range(start - len(result) + 1)
]
prev = start
result[len(result):len(str)] = [prev
for x in range(len(str) - len(result) + 1)]
return result
def prev_end_segment(str, is_segment):
str = "".join(str)
result = []
prev = 0
for start in mark_end_segment_index(str, is_segment):
result[len(result):start + 1] = [
prev for x in range(start - len(result) + 1)
]
prev = start
result[len(result):len(str)] = [len(str)
for x in range(len(str) - len(result) + 1)]
return result
| zscproject/OWASP-ZSC | module/readline_windows/pyreadline/lineeditor/wordmatcher.py | Python | gpl-3.0 | 3,535 |
def setUp(self):
import os
ov_binany_path=os.environ['OV_BINARY_PATH']
self.terminal = App.open("xterm -e " + ov_binany_path +"/openvibe-designer.sh --no-session-management")
while not self.terminal.window():
wait(1)
wait("StartInterface.png",10)
def test_boxSetAttributes(self):
click("SearchBoxBar.png")
paste("sinus")
dragDrop("Sinusoscilla-1.png",Pattern("DesignerDataGenOpen.png").similar(0.40).targetOffset(-233,-163))
rightClick("SinusOscillatorBoxSelected.png")
click(Pattern("contextualBoxMenu.png").targetOffset(-51,16))
assert(exists("renameBoxPopUp.png"))
type("XXXX XXXX XXXX"+ Key.ENTER)
assert(exists(Pattern("SinusOscillatorNewNameXXX.png").similar(0.50)))
def tearDown(self):
App.close(self.terminal)
self.terminal= None
wait(2)
| avilleret/openvibe | applications/platform/designer/test/testBoxSetAttribute.UNIX.sikuli/testBoxSetAttribute.UNIX.py | Python | agpl-3.0 | 825 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.base import XmlResponse
class AWSBaseResponse(XmlResponse):
pass
| ninefold/libcloud | libcloud/common/aws.py | Python | apache-2.0 | 874 |
"""Generic, built-in renderers."""
from dagny.action import Action
from dagny.utils import camel_to_underscore, resource_name
@Action.RENDERER.html
def render_html(action, resource, content_type=None, status=None,
current_app=None):
"""
Render an appropriate HTML response for an action.
This is a generic renderer backend which produces HTML responses. It uses
the name of the resource and current action to generate a template name,
then renders the template with a `RequestContext`.
To retrieve the template name, the resource name is first turned from
CamelCase to lowercase_underscore_separated; if the class name ends in
`Resource`, this is first removed from the end. For example:
User => user
UserResource => user
NameXYZ => name_xyz
XYZName => xyz_name
You can optionally define a template path prefix on your `Resource` like
so:
class User(Resource):
template_path_prefix = 'auth/'
# ...
The template name is assembled from the template path prefix, the
re-formatted resource name, and the current action name. So, for a `User`
resource, with `template_path_prefix = 'auth/'`, and an action of `show`,
the template name would be:
auth/user/show.html
Finally, this is rendered using `django.shortcuts.render()`. The resource
is passed into the context as `self`, so that attribute assignments from
the action will be available in the template. This also uses
`RequestContext`, so configured context processors will also be available.
"""
from django.shortcuts import render
resource_label = camel_to_underscore(resource_name(resource))
template_path_prefix = getattr(resource, 'template_path_prefix', "")
template_name = "%s%s/%s.html" % (template_path_prefix, resource_label,
action.name)
return render(resource.request, template_name, {'self': resource},
content_type=content_type, status=status,
current_app=current_app)
| zacharyvoase/dagny | src/dagny/renderers.py | Python | unlicense | 2,106 |
# Generate some random 64-bit values to use as contants for random array.
import numpy as np
import binascii
np.random.seed(0)
# TODO: check that hashes are optimal. This should be straightforward, but I'm
# checking this in early because even a potentially non-optimal version is a
# huge improvement.
num_entries = 64;
print("{")
for _ in range(num_entries):
print("0x" + str(binascii.hexlify(np.random.bytes(8)).decode()) + ",")
print("}")
| BitFunnel/BitFunnel | src/Scripts/get-random-hashes.py | Python | mit | 451 |
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
from scipy.spatial import geometric_slerp
def _generate_spherical_points(ndim=3, n_pts=2):
# generate uniform points on sphere
# see: https://stackoverflow.com/a/23785326
# tentatively extended to arbitrary dims
# for 0-sphere it will always produce antipodes
np.random.seed(123)
points = np.random.normal(size=(n_pts, ndim))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points[0], points[1]
class TestGeometricSlerp(object):
# Test various properties of the geometric slerp code
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [0, 3, 17])
def test_shape_property(self, n_dims, n_pts):
# geometric_slerp output shape should match
# input dimensionality & requested number
# of interpolation points
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert actual.shape == (n_pts, n_dims)
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [3, 17])
def test_include_ends(self, n_dims, n_pts):
# geometric_slerp should return a data structure
# that includes the start and end coordinates
# when t includes 0 and 1 ends
# this is convenient for plotting surfaces represented
# by interpolations for example
# the generator doesn't work so well for the unit
# sphere (it always produces antipodes), so use
# custom values there
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert_allclose(actual[0], start)
assert_allclose(actual[-1], end)
@pytest.mark.parametrize("start, end", [
# both arrays are not flat
(np.zeros((1, 3)), np.ones((1, 3))),
# only start array is not flat
(np.zeros((1, 3)), np.ones(3)),
# only end array is not flat
(np.zeros(1), np.ones((3, 1))),
])
def test_input_shape_flat(self, start, end):
# geometric_slerp should handle input arrays that are
# not flat appropriately
with pytest.raises(ValueError, match='one-dimensional'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# 7-D and 3-D ends
(np.zeros(7), np.ones(3)),
# 2-D and 1-D ends
(np.zeros(2), np.ones(1)),
# empty, "3D" will also get caught this way
(np.array([]), np.ones(3)),
])
def test_input_dim_mismatch(self, start, end):
# geometric_slerp must appropriately handle cases where
# an interpolation is attempted across two different
# dimensionalities
with pytest.raises(ValueError, match='dimensions'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# both empty
(np.array([]), np.array([])),
])
def test_input_at_least1d(self, start, end):
# empty inputs to geometric_slerp must
# be handled appropriately when not detected
# by mismatch
with pytest.raises(ValueError, match='at least two-dim'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end, expected", [
# North and South Poles are definitely antipodes
# but should be handled gracefully now
(np.array([0, 0, 1.0]), np.array([0, 0, -1.0]), "warning"),
# this case will issue a warning & be handled
# gracefully as well;
# North Pole was rotated very slightly
# using r = R.from_euler('x', 0.035, degrees=True)
# to achieve Euclidean distance offset from diameter by
# 9.328908379124812e-08, within the default tol
(np.array([0.00000000e+00,
-6.10865200e-04,
9.99999813e-01]), np.array([0, 0, -1.0]), "warning"),
# this case should succeed without warning because a
# sufficiently large
# rotation was applied to North Pole point to shift it
# to a Euclidean distance of 2.3036691931821451e-07
# from South Pole, which is larger than tol
(np.array([0.00000000e+00,
-9.59930941e-04,
9.99999539e-01]), np.array([0, 0, -1.0]), "success"),
])
def test_handle_antipodes(self, start, end, expected):
# antipodal points must be handled appropriately;
# there are an infinite number of possible geodesic
# interpolations between them in higher dims
if expected == "warning":
with pytest.warns(UserWarning, match='antipodes'):
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
else:
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
# antipodes or near-antipodes should still produce
# slerp paths on the surface of the sphere (but they
# may be ambiguous):
assert_allclose(np.linalg.norm(res, axis=1), 1.0)
@pytest.mark.parametrize("start, end, expected", [
# 2-D with n_pts=4 (two new interpolation points)
# this is an actual circle
(np.array([1, 0]),
np.array([0, 1]),
np.array([[1, 0],
[np.sqrt(3) / 2, 0.5], # 30 deg on unit circle
[0.5, np.sqrt(3) / 2], # 60 deg on unit circle
[0, 1]])),
# likewise for 3-D (add z = 0 plane)
# this is an ordinary sphere
(np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([[1, 0, 0],
[np.sqrt(3) / 2, 0.5, 0],
[0.5, np.sqrt(3) / 2, 0],
[0, 1, 0]])),
# for 5-D, pad more columns with constants
# zeros are easiest--non-zero values on unit
# circle are more difficult to reason about
# at higher dims
(np.array([1, 0, 0, 0, 0]),
np.array([0, 1, 0, 0, 0]),
np.array([[1, 0, 0, 0, 0],
[np.sqrt(3) / 2, 0.5, 0, 0, 0],
[0.5, np.sqrt(3) / 2, 0, 0, 0],
[0, 1, 0, 0, 0]])),
])
def test_straightforward_examples(self, start, end, expected):
# some straightforward interpolation tests, sufficiently
# simple to use the unit circle to deduce expected values;
# for larger dimensions, pad with constants so that the
# data is N-D but simpler to reason about
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
assert_allclose(actual, expected, atol=1e-16)
@pytest.mark.parametrize("t", [
# both interval ends clearly violate limits
np.linspace(-20, 20, 300),
# only one interval end violating limit slightly
np.linspace(-0.0001, 0.0001, 17),
])
def test_t_values_limits(self, t):
# geometric_slerp() should appropriately handle
# interpolation parameters < 0 and > 1
with pytest.raises(ValueError, match='interpolation parameter'):
actual = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=t)
@pytest.mark.parametrize("start, end", [
(np.array([1]),
np.array([0])),
(np.array([0]),
np.array([1])),
(np.array([-17.7]),
np.array([165.9])),
])
def test_0_sphere_handling(self, start, end):
# it does not make sense to interpolate the set of
# two points that is the 0-sphere
with pytest.raises(ValueError, match='at least two-dim'):
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
@pytest.mark.parametrize("tol", [
# an integer currently raises
5,
# string raises
"7",
# list and arrays also raise
[5, 6, 7], np.array(9.0),
])
def test_tol_type(self, tol):
# geometric_slerp() should raise if tol is not
# a suitable float type
with pytest.raises(ValueError, match='must be a float'):
actual = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("tol", [
-5e-6,
-7e-10,
])
def test_tol_sign(self, tol):
# geometric_slerp() currently handles negative
# tol values, as long as they are floats
actual = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("start, end", [
# 1-sphere (circle) with one point at origin
# and the other on the circle
(np.array([1, 0]), np.array([0, 0])),
# 2-sphere (normal sphere) with both points
# just slightly off sphere by the same amount
# in different directions
(np.array([1 + 1e-6, 0, 0]),
np.array([0, 1 - 1e-6, 0])),
# same thing in 4-D
(np.array([1 + 1e-6, 0, 0, 0]),
np.array([0, 1 - 1e-6, 0, 0])),
])
def test_unit_sphere_enforcement(self, start, end):
# geometric_slerp() should raise on input that clearly
# cannot be on an n-sphere of radius 1
with pytest.raises(ValueError, match='unit n-sphere'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 5))
@pytest.mark.parametrize("start, end", [
# 1-sphere 45 degree case
(np.array([1, 0]),
np.array([np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
# 2-sphere 135 degree case
(np.array([1, 0]),
np.array([-np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
])
@pytest.mark.parametrize("t_func", [
np.linspace, np.logspace])
def test_order_handling(self, start, end, t_func):
# geometric_slerp() should handle scenarios with
# ascending and descending t value arrays gracefully;
# results should simply be reversed
# for scrambled / unsorted parameters, the same values
# should be returned, just in scrambled order
num_t_vals = 20
np.random.seed(789)
forward_t_vals = t_func(0, 10, num_t_vals)
# normalize to max of 1
forward_t_vals /= forward_t_vals.max()
reverse_t_vals = np.flipud(forward_t_vals)
shuffled_indices = np.arange(num_t_vals)
np.random.shuffle(shuffled_indices)
scramble_t_vals = forward_t_vals.copy()[shuffled_indices]
forward_results = geometric_slerp(start=start,
end=end,
t=forward_t_vals)
reverse_results = geometric_slerp(start=start,
end=end,
t=reverse_t_vals)
scrambled_results = geometric_slerp(start=start,
end=end,
t=scramble_t_vals)
# check fidelity to input order
assert_allclose(forward_results, np.flipud(reverse_results))
assert_allclose(forward_results[shuffled_indices],
scrambled_results)
@pytest.mark.parametrize("t", [
# string:
"15, 5, 7",
# complex numbers currently produce a warning
# but not sure we need to worry about it too much:
# [3 + 1j, 5 + 2j],
])
def test_t_values_conversion(self, t):
with pytest.raises(ValueError):
scrambled_results = geometric_slerp(start=np.array([1]),
end=np.array([0]),
t=t)
def test_accept_arraylike(self):
# array-like support requested by reviewer
# in gh-10380
actual = geometric_slerp([1, 0], [0, 1], [0, 1/3, 0.5, 2/3, 1])
# expected values are based on visual inspection
# of the unit circle for the progressions along
# the circumference provided in t
expected = np.array([[1, 0],
[np.sqrt(3) / 2, 0.5],
[np.sqrt(2) / 2,
np.sqrt(2) / 2],
[0.5, np.sqrt(3) / 2],
[0, 1]], dtype=np.float64)
# Tyler's original Cython implementation of geometric_slerp
# can pass at atol=0 here, but on balance we will accept
# 1e-16 for an implementation that avoids Cython and
# makes up accuracy ground elsewhere
assert_allclose(actual, expected, atol=1e-16)
def test_scalar_t(self):
# when t is a scalar, return value is a single
# interpolated point of the appropriate dimensionality
# requested by reviewer in gh-10380
actual = geometric_slerp([1, 0], [0, 1], 0.5)
expected = np.array([np.sqrt(2) / 2,
np.sqrt(2) / 2], dtype=np.float64)
assert actual.shape == (2,)
assert_allclose(actual, expected)
@pytest.mark.parametrize('start', [
np.array([1, 0, 0]),
np.array([0, 1]),
])
def test_degenerate_input(self, start):
# handle start == end with repeated value
# like np.linspace
expected = [start] * 5
actual = geometric_slerp(start=start,
end=start,
t=np.linspace(0, 1, 5))
assert_allclose(actual, expected)
@pytest.mark.parametrize('k', np.logspace(-10, -1, 10))
def test_numerical_stability_pi(self, k):
# geometric_slerp should have excellent numerical
# stability for angles approaching pi between
# the start and end points
angle = np.pi - k
ts = np.linspace(0, 1, 100)
P = np.array([1, 0, 0, 0])
Q = np.array([np.cos(angle), np.sin(angle), 0, 0])
# the test should only be enforced for cases where
# geometric_slerp determines that the input is actually
# on the unit sphere
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning)
result = geometric_slerp(P, Q, ts, 1e-18)
norms = np.linalg.norm(result, axis=1)
error = np.max(np.abs(norms - 1))
assert error < 4e-15
| person142/scipy | scipy/spatial/tests/test_slerp.py | Python | bsd-3-clause | 15,574 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function
import pytest
from pytest_sftpserver.sftp.server import SFTPServer
@pytest.yield_fixture(scope="session")
def sftpserver(request):
server = SFTPServer()
server.start()
yield server
if server.is_alive():
server.shutdown()
| ulope/pytest-sftpserver | pytest_sftpserver/plugin.py | Python | mit | 334 |
"""
Copyright (c), Privacy By Design Foundation
All rights reserved.
This source code has been ported from https://github.com/privacybydesign/irmago
The authors of this file are not -in any way- affiliated with the original authors or organizations.
"""
import binascii
import calendar
import datetime
import hashlib
import time
from .....util import int2byte
ExpiryFactor = 60 * 60 * 24 * 7
metadataLength = 1 + 3 + 2 + 2 + 16
class metadataField(object):
def __init__(self, length, offset):
self.length = length
self.offset = offset
versionField = metadataField(1, 0)
signingDateField = metadataField(3, 1)
validityField = metadataField(2, 4)
keyCounterField = metadataField(2, 6)
credentialID = metadataField(16, 8)
def int_to_str(n):
hexInt = hex(n).lstrip('0x').rstrip('L')
if (len(hexInt) % 2) == 1:
hexInt = '0' + hexInt
return binascii.unhexlify(hexInt)
def shortToByte(x):
return int_to_str(x)[-2:]
class MetadataAttribute(object):
def __init__(self, version):
self.Int = 0
self.pk = None
self.Conf = None
self.setField(versionField, version)
self.setSigningDate()
self.setKeyCounter(0)
self.setExpiryDate()
def Bytes(self):
bytez = int_to_str(self.Int)
if len(bytez) < metadataLength:
bytez += b'\x00' * (metadataLength - len(bytez))
return bytez
def setField(self, field, value):
bytez_array = [int2byte(c) if isinstance(c, int) else c for c in self.Bytes()]
startindex = field.length - len(value)
for i in range(field.length):
if i < startindex:
bytez_array[i + field.offset] = b'\x00'
else:
bytez_array[i + field.offset] = value[i - startindex:i - startindex + 1]
self.Int = int(binascii.hexlify(b''.join(bytez_array)), 16)
def field(self, field):
return self.Bytes()[field.offset:field.offset + field.length]
def setSigningDate(self, timestamp=None):
if timestamp:
self.setField(signingDateField, shortToByte(timestamp))
else:
self.setField(signingDateField, shortToByte(int(time.time() / ExpiryFactor)))
def setKeyCounter(self, i):
self.setField(keyCounterField, shortToByte(i))
def SigningDate(self):
bytez_array = [int2byte(c) if isinstance(c, int) else c for c in self.field(signingDateField)]
bytez_array = bytez_array[1:]
timestamp = int(binascii.hexlify(b''.join(bytez_array)), 16) * ExpiryFactor
return timestamp
def setValidityDuration(self, weeks):
self.setField(validityField, shortToByte(weeks))
def setExpiryDate(self):
expiry = datetime.datetime.now()
month = expiry.month - 1 + 6
year = expiry.year + month // 12
month = month % 12 + 1
day = min(expiry.day, calendar.monthrange(year, month)[1])
expiry = time.mktime(datetime.date(year, month, day).timetuple())
signing = self.SigningDate()
self.setValidityDuration(int((expiry - signing) / ExpiryFactor))
def setExpiryDateFromTimestamp(self, expiry):
signing = self.SigningDate()
self.setValidityDuration(int((expiry - signing) / ExpiryFactor))
def setCredentialTypeIdentifier(self, the_id):
bytez = hashlib.sha256(the_id).digest()
self.setField(credentialID, bytez[:16])
def make_attribute_list(cr, attribute_order=None, validity_signing=None):
"""
cr =
{
u'attributes': { ... "name": "value" ... },
u'credential': u'pbdf.nijmegen.address',
u'keyCounter': 0,
u'validity': 1570123936
}
:param attribute_order: the order in which to handle the keys
:type attribute_order: list
"""
meta = MetadataAttribute(b'\x03')
meta.setKeyCounter(cr[u'keyCounter'])
meta.setCredentialTypeIdentifier(cr[u'credential'].encode('utf-8'))
if validity_signing:
meta.setValidityDuration(validity_signing[0])
meta.setSigningDate(validity_signing[1])
else:
meta.setSigningDate()
meta.setExpiryDateFromTimestamp(cr[u'validity'])
signing_date = int(binascii.hexlify(meta.field(signingDateField)), 16)
attrs = [meta.Int]
attr_map = cr[u"attributes"]
attribute_order = attribute_order or attr_map.keys()
for k in attribute_order:
if attr_map.get(k, None) is None:
attrs.append(0)
elif not attr_map[k]:
attrs.append(1)
else:
encoded = attr_map[k].encode('utf-8')
v = int(binascii.hexlify(b''.join(int2byte(c) if isinstance(c, int) else c for c in encoded)), 16)
v <<= 1
v += 1
attrs.append(v)
return attrs, signing_date
| qstokkink/py-ipv8 | ipv8/attestation/wallet/irmaexact/gabi/attributes.py | Python | lgpl-3.0 | 4,832 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-19 14:00
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lifts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cardio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(verbose_name='created at')),
],
),
migrations.CreateModel(
name='CardioInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('type', models.IntegerField()),
('created_at', models.DateTimeField(verbose_name='created at')),
('duration', models.FloatField()),
('distance', models.FloatField()),
],
),
migrations.CreateModel(
name='LiftInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(verbose_name='created at')),
('type', models.IntegerField()),
('reps', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
('sets', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
('weight', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
],
),
migrations.CreateModel(
name='Workout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(verbose_name='created at')),
('cardio_plan', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
('lift_plan', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
],
),
migrations.CreateModel(
name='WorkoutInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(verbose_name='created at')),
('cardio_events', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
('lift_events', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
],
),
]
| davishayden/gains | lifts/migrations/0002_cardio_cardioinstance_liftinstance_workout_workoutinstance.py | Python | mit | 3,125 |
from __future__ import absolute_import, print_function, division
import petl as etl
table = [['foo', 'bar'],
['a', 1],
['b', None]]
# raises exception under Python 3
etl.select(table, 'bar', lambda v: v > 0)
# no error under Python 3
etl.selectgt(table, 'bar', 0)
# or ...
etl.select(table, 'bar', lambda v: v > etl.Comparable(0))
| psnj/petl | examples/comparison.py | Python | mit | 353 |
import gc
import os
import sys
import traceback
import gtk
import pango
try:
import gtksourceview
GTK_SOURCE_VIEW = True
except:
GTK_SOURCE_VIEW = False
import events
import urk
import ui
import editor
try:
editorwindows = editor.editorwindows
except AttributeError:
editorwindows = {}
class EditorWidget(gtk.VBox):
def goto(self, line, offset):
buffer = self.output.get_buffer()
cursor = buffer.get_iter_at_line_offset(
line-1, offset-1
)
buffer.place_cursor(cursor)
self.output.scroll_to_iter(cursor, 0)
def get_text(self):
buffer = self.output.get_buffer()
buffer.set_modified(False)
return buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter())
def set_text(self, text):
buffer = self.output.get_buffer()
if GTK_SOURCE_VIEW:
buffer.begin_not_undoable_action()
buffer.set_text(text)
buffer.set_modified(False)
if GTK_SOURCE_VIEW:
buffer.end_not_undoable_action()
text = property(get_text, set_text)
def edit_widget(self):
if GTK_SOURCE_VIEW:
self.output = gtksourceview.SourceView(gtksourceview.SourceBuffer())
else:
self.output = gtk.TextView()
self.output.modify_font(pango.FontDescription('monospace 9'))
self.output.set_wrap_mode(gtk.WRAP_WORD)
if GTK_SOURCE_VIEW:
self.output.set_show_line_numbers(True)
self.output.set_show_line_markers(True)
self.output.set_auto_indent(True)
self.output.set_insert_spaces_instead_of_tabs(True)
self.output.set_tabs_width(4)
self.output.set_show_margin(True)
self.output.set_margin(80)
buffer = self.output.get_buffer()
buffer.set_language(
gtksourceview.SourceLanguagesManager()
.get_language_from_mime_type('text/x-python')
)
buffer.set_check_brackets(True)
buffer.set_highlight(True)
def __init__(self):
gtk.VBox.__init__(self)
self.edit_widget()
topbox = gtk.ScrolledWindow()
topbox.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
topbox.add(self.output)
self.pack_end(topbox)
self.show_all()
menu_ui = """
<ui>
<menubar name="MenuBar">
<menu action="ScriptMenu">
<menuitem action="Save"/>
<menuitem action="Open"/>
</menu>
</menubar>
</ui>
"""
class ConfirmCloseDialog(gtk.Dialog):
def __init__(self, parent, name):
gtk.Dialog.__init__(self, "Question", parent,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
("Close without Saving", gtk.RESPONSE_CLOSE,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK,))
self.set_property("resizable", False)
self.set_property("border_width", 6)
image = gtk.image_new_from_stock(gtk.STOCK_DIALOG_WARNING,gtk.ICON_SIZE_DIALOG)
image.set_property("yalign",0.0)
label = gtk.Label()
label.set_property("selectable", True)
label.set_markup(
"""<span weight="bold" size="larger">Save changes to script "%s" before closing?</span>
If you don't save, changes will be permanently lost.
""" % name)
hbox = gtk.HBox(spacing=12)
hbox.set_property('border-width', 6)
hbox.pack_start(image)
hbox.pack_end(label)
self.vbox.set_property("spacing", 12)
self.vbox.pack_start(hbox)
self.show_all()
#This really is needed for pygtk 2.6
class ConfirmOverwriteDialog(gtk.Dialog):
def __init__(self, parent, filename):
gtk.Dialog.__init__(self, "Question", parent,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
"Replace", gtk.RESPONSE_OK,))
self.set_property("resizable", False)
self.set_property("border_width", 6)
image = gtk.image_new_from_stock(gtk.STOCK_DIALOG_WARNING,gtk.ICON_SIZE_DIALOG)
image.set_property("yalign",0.0)
path, shortfile = os.path.split(filename)
path = os.path.split(path)[1] or path
label = gtk.Label()
label.set_property("selectable", True)
label.set_markup(
"""<span weight="bold" size="larger">A file named "%s" already exists. Do you want to replace it?</span>
The file already exists in "%s". Replacing it will overwrite its contents.
""" % (shortfile, path))
hbox = gtk.HBox(spacing=12)
hbox.set_property('border-width', 6)
hbox.pack_start(image)
hbox.pack_end(label)
self.vbox.set_property("spacing", 12)
self.vbox.pack_start(hbox)
self.show_all()
class EditorWindow(gtk.Window):
def title(self, *args):
if self.editor.output.get_buffer().get_modified():
modified = '*'
else:
modified = ''
if self.filename:
self.set_title(
'%s%s (%s)' % (
modified,
events.get_scriptname(self.filename),
self.filename
))
else:
self.set_title('%sNew Script' % modified)
def open(self, _action):
chooser = gtk.FileChooserDialog(
"Open Script", self, gtk.FILE_CHOOSER_ACTION_OPEN,
(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK
)
)
chooser.set_current_folder(os.path.realpath(os.path.join(urk.userpath, 'scripts')))
def on_response(dialog, response_id):
if response_id == gtk.RESPONSE_OK:
self.filename = chooser.get_filename()
self.load()
dialog.destroy()
chooser.set_modal(True)
chooser.show()
chooser.connect("response", on_response)
def load(self):
if self.filename:
try:
self.editor.text = file(self.filename).read()
except IOError:
self.editor.output.get_buffer().set_modified(True)
def save(self, action=None, parent=None, on_save=lambda:None):
if self.filename:
file(self.filename, "wb").write(self.editor.text)
editorwindows[self.filename] = self
if events.is_loaded(self.filename):
try:
events.reload(self.filename)
self.status.push(0, "Saved and reloaded %s" % self.filename)
except Exception, e:
if isinstance(e, SyntaxError) and self.filename == e.filename:
self.status.push(0, "SyntaxError: %s" % e.msg)
self.editor.goto(e.lineno, e.offset)
elif isinstance(e, SyntaxError):
self.status.push(0, "SyntaxError: %s (%s, line %s)" % (e.msg, e.filename, e.lineno))
else:
self.status.push(0, traceback.format_exception_only(sys.exc_type, sys.exc_value)[0].strip())
else:
self.status.push(0, "Saved %s" % self.filename)
on_save()
else:
parent = parent or self
chooser = gtk.FileChooserDialog(
"Save Script", parent, gtk.FILE_CHOOSER_ACTION_SAVE,
(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK
)
)
chooser.set_current_folder(os.path.realpath(os.path.join(urk.userpath, 'scripts')))
chooser.set_default_response(gtk.RESPONSE_OK)
def on_overwrite_response(confirm, response_id):
confirm.destroy()
if response_id == gtk.RESPONSE_OK:
self.filename = chooser.get_filename()
self.title()
self.save(action, parent, on_save)
chooser.destroy()
def on_response(chooser, response_id):
if response_id == gtk.RESPONSE_OK:
filename = chooser.get_filename()
if os.path.exists(filename):
confirm = ConfirmOverwriteDialog(chooser, filename)
confirm.connect("response", on_overwrite_response)
else:
self.filename = filename
self.title()
self.save(action)
chooser.destroy()
else:
chooser.destroy()
if parent != self:
#if we were spawned by a dialog, that dialog is modal, and so
# must we be to get input
chooser.set_modal(True)
chooser.connect("response", on_response)
chooser.show()
def menu(self):
actions = (
('ScriptMenu', None, '_Script'),
('Save', gtk.STOCK_SAVE, '_Save', '<Control>S', None, self.save),
('Open', gtk.STOCK_OPEN, '_Open', '<Control>O', None, self.open),
)
actiongroup = gtk.ActionGroup('Edit')
actiongroup.add_actions(actions)
uimanager = gtk.UIManager()
uimanager.add_ui_from_string(menu_ui)
uimanager.insert_action_group(actiongroup, 0)
self.add_accel_group(uimanager.get_accel_group())
return uimanager.get_widget("/MenuBar")
def on_delete(self, event):
if self.editor.output.get_buffer().get_modified():
dialog = ConfirmCloseDialog(self,
self.filename and events.get_scriptname(self.filename) or "New Script")
def on_response(widget, response_id):
if response_id == gtk.RESPONSE_OK: #Save
def on_save():
widget.destroy()
self.on_destroy()
self.destroy()
self.save(parent=widget, on_save=on_save)
elif response_id == gtk.RESPONSE_CANCEL:
widget.destroy()
elif response_id == gtk.RESPONSE_CLOSE:
widget.destroy()
self.on_destroy()
self.destroy()
dialog.connect("response", on_response)
return True
else:
self.on_destroy()
def on_destroy(self, *args):
editorwindows.pop(self.filename, None)
def __init__(self, filename='', open_lineno=None):
gtk.Window.__init__(self)
self.filename = filename
try:
self.set_icon(
gtk.gdk.pixbuf_new_from_file(urk.path("urk_icon.svg"))
)
except:
pass
self.set_default_size(640, 480)
self.editor = EditorWidget()
self.editor.output.get_buffer().connect('modified-changed', self.title)
self.load()
self.title()
self.status = gtk.Statusbar()
self.status.set_has_resize_grip(True)
menu = self.menu()
box = gtk.VBox()
box.pack_start(menu, expand=False)
box.pack_start(self.editor)
box.pack_end(self.status, expand=False)
self.connect("delete-event", EditorWindow.on_delete)
#self.connect("destroy-event", EditorWindow.on_destroy)
try:
self.set_icon(
gtk.gdk.pixbuf_new_from_file(urk.path("urk_icon.svg"))
)
except:
pass
self.add(box)
self.show_all()
if filename:
editorwindows[filename] = self
if open_lineno:
#the scrolling doesn't seem to work if we use goto immediately
ui.register_idle(self.editor.goto, open_lineno, 1)
def realfilename(filename):
try:
filename = os.path.abspath(events.get_filename(filename))
except ImportError:
filename = os.path.join(urk.userpath,'scripts',filename)
if not filename.endswith('.py'):
filename += ".py"
return filename
def edit(filename=None, lineno=None):
gc.collect()
if filename:
filename = realfilename(filename)
if filename in editorwindows:
window = editorwindows[filename]
if lineno:
window.editor.goto(lineno, 1)
window.present()
else:
EditorWindow(filename, lineno)
def onCommandEdit(e):
if e.args:
edit(e.args[0])
else:
edit()
def onMainMenu(e):
e.menu += [('Editor', edit)]
def findsubstr(text, substr):
"""findsubstr(text, substr) - returns an iterator of positions of substr within text"""
position = text.find(substr)
while position != -1:
yield position
position = text.find(substr, position+1)
def get_codelink(e):
"""get_codelink(e) - check a mouse event for code links like the following:
"/usr/lib/python2.5/os.py", line 348
"events.py", line 44
/usr/lib/python2.5/os.py, line 348
os.py, line 348
set the following variables on the event:
e._hascodelink: True if the cursor is on a code link and the others are set
e._codelink_file: The absolute or relative filename without quotes
e._codelink_lineno: The line number
e._codelink_fr, e_codelink_to: the starting and ending characters of the found link
"""
for position in findsubstr(e.text, ', line '):
#find the first non-digit after " line "
for pos_to in range(position+7, len(e.text)):
if not e.text[pos_to].isdigit():
break
else:
#we've hit the end of the string and it's all digits; that's ok
pos_to = len(e.text)
if pos_to == position+7:
#a 0-digit line number
continue
if pos_to < e.pos:
#the cursor is to the right of this link
continue
#now find the first space before " line "
pos_fr = e.text.rfind(' ', 0, position)+1
#conveniently, if there was no space, we're at the start of the line
if e.pos < pos_fr:
#we're to the left of this link and any others we might find
e._hascodelink = False
return
else:
#whee, found a link!
break
else:
e._hascodelink = False
return
e._hascodelink = True
e._codelink_file = e.text[pos_fr:position].strip('"')
e._codelink_lineno = int(e.text[position+7:pos_to])
e._codelink_fr = pos_fr
e._codelink_to = pos_to
if e._codelink_file == "<string>":
e._hascodelink = False
def onHover(e):
get_codelink(e)
if e._hascodelink:
e.tolink.add((e._codelink_fr, e._codelink_to))
def onClick(e):
get_codelink(e)
if e._hascodelink:
edit(e._codelink_file, e._codelink_lineno)
| madewokherd/urk | platform/gtk/scripts/editor.py | Python | gpl-2.0 | 15,495 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from keystoneclient import access
from keystoneclient import exceptions
from keystoneclient import fixture
from keystoneclient.tests.unit.v3 import utils
from keystoneclient.v3.contrib.federation import base
from keystoneclient.v3.contrib.federation import identity_providers
from keystoneclient.v3.contrib.federation import mappings
from keystoneclient.v3.contrib.federation import protocols
from keystoneclient.v3 import domains
from keystoneclient.v3 import projects
class IdentityProviderTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(IdentityProviderTests, self).setUp()
self.key = 'identity_provider'
self.collection_key = 'identity_providers'
self.model = identity_providers.IdentityProvider
self.manager = self.client.federation.identity_providers
self.path_prefix = 'OS-FEDERATION'
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('description', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
return kwargs
def test_positional_parameters_expect_fail(self):
"""Ensure CrudManager raises TypeError exceptions.
After passing wrong number of positional arguments
an exception should be raised.
Operations to be tested:
* create()
* get()
* list()
* delete()
* update()
"""
POS_PARAM_1 = uuid.uuid4().hex
POS_PARAM_2 = uuid.uuid4().hex
POS_PARAM_3 = uuid.uuid4().hex
PARAMETERS = {
'create': (POS_PARAM_1, POS_PARAM_2),
'get': (POS_PARAM_1, POS_PARAM_2),
'list': (POS_PARAM_1, POS_PARAM_2),
'update': (POS_PARAM_1, POS_PARAM_2, POS_PARAM_3),
'delete': (POS_PARAM_1, POS_PARAM_2)
}
for f_name, args in PARAMETERS.items():
self.assertRaises(TypeError, getattr(self.manager, f_name),
*args)
def test_create(self, ref=None, req_ref=None):
ref = ref or self.new_ref()
# req_ref argument allows you to specify a different
# signature for the request when the manager does some
# conversion before doing the request (e.g. converting
# from datetime object to timestamp string)
req_ref = (req_ref or ref).copy()
req_ref.pop('id')
self.stub_entity('PUT', entity=ref, id=ref['id'], status_code=201)
returned = self.manager.create(**ref)
self.assertIsInstance(returned, self.model)
for attr in req_ref:
self.assertEqual(
getattr(returned, attr),
req_ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
class MappingTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(MappingTests, self).setUp()
self.key = 'mapping'
self.collection_key = 'mappings'
self.model = mappings.Mapping
self.manager = self.client.federation.mappings
self.path_prefix = 'OS-FEDERATION'
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('rules', [uuid.uuid4().hex,
uuid.uuid4().hex])
return kwargs
def test_create(self, ref=None, req_ref=None):
ref = ref or self.new_ref()
manager_ref = ref.copy()
mapping_id = manager_ref.pop('id')
# req_ref argument allows you to specify a different
# signature for the request when the manager does some
# conversion before doing the request (e.g. converting
# from datetime object to timestamp string)
req_ref = (req_ref or ref).copy()
self.stub_entity('PUT', entity=req_ref, id=mapping_id,
status_code=201)
returned = self.manager.create(mapping_id=mapping_id, **manager_ref)
self.assertIsInstance(returned, self.model)
for attr in req_ref:
self.assertEqual(
getattr(returned, attr),
req_ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(manager_ref)
class ProtocolTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(ProtocolTests, self).setUp()
self.key = 'protocol'
self.collection_key = 'protocols'
self.model = protocols.Protocol
self.manager = self.client.federation.protocols
self.path_prefix = 'OS-FEDERATION/identity_providers'
def _transform_to_response(self, ref):
"""Rebuild dictionary so it can be used as a
reference response body.
"""
response = copy.deepcopy(ref)
response['id'] = response.pop('protocol_id')
del response['identity_provider']
return response
def new_ref(self, **kwargs):
kwargs.setdefault('mapping', uuid.uuid4().hex)
kwargs.setdefault('identity_provider', uuid.uuid4().hex)
kwargs.setdefault('protocol_id', uuid.uuid4().hex)
return kwargs
def build_parts(self, identity_provider, protocol_id=None):
"""Build array used to construct mocking URL.
Construct and return array with URL parts later used
by methods like utils.TestCase.stub_entity().
Example of URL:
``OS-FEDERATION/identity_providers/{idp_id}/
protocols/{protocol_id}``
"""
parts = ['OS-FEDERATION', 'identity_providers',
identity_provider, 'protocols']
if protocol_id:
parts.append(protocol_id)
return parts
def test_build_url_provide_base_url(self):
base_url = uuid.uuid4().hex
parameters = {'base_url': base_url}
url = self.manager.build_url(dict_args_in_out=parameters)
self.assertEqual('/'.join([base_url, self.collection_key]), url)
def test_build_url_w_idp_id(self):
"""Test whether kwargs ``base_url`` discards object's base_url
This test shows, that when ``base_url`` is specified in the
dict_args_in_out dictionary, values like ``identity_provider_id``
are not taken into consideration while building the url.
"""
base_url, identity_provider_id = uuid.uuid4().hex, uuid.uuid4().hex
parameters = {
'base_url': base_url,
'identity_provider_id': identity_provider_id
}
url = self.manager.build_url(dict_args_in_out=parameters)
self.assertEqual('/'.join([base_url, self.collection_key]), url)
def test_build_url_default_base_url(self):
identity_provider_id = uuid.uuid4().hex
parameters = {
'identity_provider_id': identity_provider_id
}
url = self.manager.build_url(dict_args_in_out=parameters)
self.assertEqual(
'/'.join([self.manager.base_url, identity_provider_id,
self.manager.collection_key]), url)
def test_create(self):
"""Test creating federation protocol tied to an Identity Provider.
URL to be tested: PUT /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
expected = self._transform_to_response(request_args)
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('PUT', entity=expected,
parts=parts, status_code=201)
returned = self.manager.create(**request_args)
self.assertEqual(expected, returned.to_dict())
request_body = {'mapping_id': request_args['mapping']}
self.assertEntityRequestBodyIs(request_body)
def test_get(self):
"""Fetch federation protocol object.
URL to be tested: GET /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
expected = self._transform_to_response(request_args)
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('GET', entity=expected,
parts=parts, status_code=201)
returned = self.manager.get(request_args['identity_provider'],
request_args['protocol_id'])
self.assertIsInstance(returned, self.model)
self.assertEqual(expected, returned.to_dict())
def test_delete(self):
"""Delete federation protocol object.
URL to be tested: DELETE /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('DELETE', parts=parts, status_code=204)
self.manager.delete(request_args['identity_provider'],
request_args['protocol_id'])
def test_list(self):
"""Test listing all federation protocols tied to the Identity Provider.
URL to be tested: GET /OS-FEDERATION/identity_providers/
$identity_provider/protocols
"""
def _ref_protocols():
return {
'id': uuid.uuid4().hex,
'mapping_id': uuid.uuid4().hex
}
request_args = self.new_ref()
expected = [_ref_protocols() for _ in range(3)]
parts = self.build_parts(request_args['identity_provider'])
self.stub_entity('GET', parts=parts,
entity=expected, status_code=200)
returned = self.manager.list(request_args['identity_provider'])
for obj, ref_obj in zip(returned, expected):
self.assertEqual(obj.to_dict(), ref_obj)
def test_list_params(self):
request_args = self.new_ref()
filter_kwargs = {uuid.uuid4().hex: uuid.uuid4().hex}
parts = self.build_parts(request_args['identity_provider'])
# Return HTTP 401 as we don't accept such requests.
self.stub_entity('GET', parts=parts, status_code=401)
self.assertRaises(exceptions.Unauthorized,
self.manager.list,
request_args['identity_provider'],
**filter_kwargs)
self.assertQueryStringContains(**filter_kwargs)
def test_update(self):
"""Test updating federation protocol
URL to be tested: PATCH /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
"""
request_args = self.new_ref()
expected = self._transform_to_response(request_args)
parts = self.build_parts(request_args['identity_provider'],
request_args['protocol_id'])
self.stub_entity('PATCH', parts=parts,
entity=expected, status_code=200)
returned = self.manager.update(request_args['identity_provider'],
request_args['protocol_id'],
mapping=request_args['mapping'])
self.assertIsInstance(returned, self.model)
self.assertEqual(expected, returned.to_dict())
request_body = {'mapping_id': request_args['mapping']}
self.assertEntityRequestBodyIs(request_body)
class EntityManagerTests(utils.TestCase):
def test_create_object_expect_fail(self):
self.assertRaises(TypeError,
base.EntityManager,
self.client)
class FederationProjectTests(utils.TestCase):
def setUp(self):
super(FederationProjectTests, self).setUp()
self.key = 'project'
self.collection_key = 'projects'
self.model = projects.Project
self.manager = self.client.federation.projects
self.URL = "%s%s" % (self.TEST_URL, '/OS-FEDERATION/projects')
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('domain_id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
return kwargs
def test_list_accessible_projects(self):
projects_ref = [self.new_ref(), self.new_ref()]
projects_json = {
self.collection_key: [self.new_ref(), self.new_ref()]
}
self.requests.get(self.URL, json=projects_json)
returned_list = self.manager.list()
self.assertEqual(len(projects_ref), len(returned_list))
for project in returned_list:
self.assertIsInstance(project, self.model)
class FederationDomainTests(utils.TestCase):
def setUp(self):
super(FederationDomainTests, self).setUp()
self.key = 'domain'
self.collection_key = 'domains'
self.model = domains.Domain
self.manager = self.client.federation.domains
self.URL = "%s%s" % (self.TEST_URL, '/OS-FEDERATION/domains')
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
kwargs.setdefault('description', uuid.uuid4().hex)
return kwargs
def test_list_accessible_domains(self):
domains_ref = [self.new_ref(), self.new_ref()]
domains_json = {
self.collection_key: domains_ref
}
self.requests.get(self.URL, json=domains_json)
returned_list = self.manager.list()
self.assertEqual(len(domains_ref), len(returned_list))
for domain in returned_list:
self.assertIsInstance(domain, self.model)
class FederatedTokenTests(utils.TestCase):
def setUp(self):
super(FederatedTokenTests, self).setUp()
token = fixture.V3FederationToken()
token.set_project_scope()
token.add_role()
self.federated_token = access.AccessInfo.factory(body=token)
def test_federated_property_federated_token(self):
"""Check if is_federated property returns expected value."""
self.assertTrue(self.federated_token.is_federated)
def test_get_user_domain_name(self):
"""Ensure a federated user's domain name does not exist."""
self.assertIsNone(self.federated_token.user_domain_name)
def test_get_user_domain_id(self):
"""Ensure a federated user's domain ID does not exist."""
self.assertIsNone(self.federated_token.user_domain_id)
| ging/python-keystoneclient | keystoneclient/tests/unit/v3/test_federation.py | Python | apache-2.0 | 15,242 |
#(C) Copyright Syd Logan 2021
#(C) Copyright Thousand Smiles Foundation 2021
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
lines = tuple(open(sys.argv[1], 'r'))
count = 0
ret = ""
for x in lines[1:]:
f = x.split('\t')
ret += f[0]
if count < len(lines) - 2:
ret += '__'
count = count + 1
print(ret)
| slogan621/tscharts | tools/exportcsv/headers.py | Python | apache-2.0 | 822 |
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ServicePosition import ServicePositionGauge
from Components.ActionMap import HelpableActionMap
from Components.MultiContent import MultiContentEntryText
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
from Components.VideoWindow import VideoWindow
from Components.Label import Label
from Screens.InfoBarGenerics import InfoBarSeek, InfoBarCueSheetSupport
from Components.GUIComponent import GUIComponent
from enigma import eListboxPythonMultiContent, eListbox, getDesktop, gFont, iPlayableService, RT_HALIGN_RIGHT
from Screens.FixedMenu import FixedMenu
from Screens.HelpMenu import HelpableScreen
from ServiceReference import ServiceReference
from Components.Sources.List import List
import bisect
def CutListEntry(where, what):
w = where / 90
ms = w % 1000
s = (w / 1000) % 60
m = (w / 60000) % 60
h = w / 3600000
if what == 0:
type = "IN"
type_col = 0x004000
elif what == 1:
type = "OUT"
type_col = 0x400000
elif what == 2:
type = "MARK"
type_col = 0x000040
elif what == 3:
type = "LAST"
type_col = 0x000000
return ((where, what), "%dh:%02dm:%02ds:%03d" % (h, m, s, ms), type, type_col)
class CutListContextMenu(FixedMenu):
RET_STARTCUT = 0
RET_ENDCUT = 1
RET_DELETECUT = 2
RET_MARK = 3
RET_DELETEMARK = 4
RET_REMOVEBEFORE = 5
RET_REMOVEAFTER = 6
RET_GRABFRAME = 7
SHOW_STARTCUT = 0
SHOW_ENDCUT = 1
SHOW_DELETECUT = 2
def __init__(self, session, state, nearmark):
menu = [(_("back"), self.close)] #, (None, )]
if state == self.SHOW_STARTCUT:
menu.append((_("start cut here"), self.startCut))
else:
menu.append((_("start cut here"), ))
if state == self.SHOW_ENDCUT:
menu.append((_("end cut here"), self.endCut))
else:
menu.append((_("end cut here"), ))
if state == self.SHOW_DELETECUT:
menu.append((_("delete cut"), self.deleteCut))
else:
menu.append((_("delete cut"), ))
menu.append((_("remove before this position"), self.removeBefore))
menu.append((_("remove after this position"), self.removeAfter))
# menu.append((None, ))
if not nearmark:
menu.append((_("insert mark here"), self.insertMark))
else:
menu.append((_("remove this mark"), self.removeMark))
menu.append((_("grab this frame as bitmap"), self.grabFrame))
FixedMenu.__init__(self, session, _("Cut"), menu)
self.skinName = "Menu"
def startCut(self):
self.close(self.RET_STARTCUT)
def endCut(self):
self.close(self.RET_ENDCUT)
def deleteCut(self):
self.close(self.RET_DELETECUT)
def insertMark(self):
self.close(self.RET_MARK)
def removeMark(self):
self.close(self.RET_DELETEMARK)
def removeBefore(self):
self.close(self.RET_REMOVEBEFORE)
def removeAfter(self):
self.close(self.RET_REMOVEAFTER)
def grabFrame(self):
self.close(self.RET_GRABFRAME)
class CutListEditor(Screen, InfoBarBase, InfoBarSeek, InfoBarCueSheetSupport, HelpableScreen):
skin = """
<screen position="0,0" size="720,576" title="Cutlist editor" flags="wfNoBorder">
<eLabel text="Cutlist editor" position="65,60" size="300,25" font="Regular;20" />
<widget source="global.CurrentTime" render="Label" position="268,60" size="394,20" font="Regular;20" halign="right">
<convert type="ClockToText">Format:%A %B %d, %H:%M</convert>
</widget>
<eLabel position="268,98" size="394,304" backgroundColor="#505555" />
<widget name="Video" position="270,100" zPosition="1" size="390,300" backgroundColor="transparent" />
<widget source="session.CurrentService" render="Label" position="135,405" size="450,50" font="Regular;22" halign="center" valign="center">
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="320,450" zPosition="1" size="420,25" font="Regular;20" halign="left" valign="center">
<convert type="ServicePosition">Position,Detailed</convert>
</widget>
<widget name="SeekState" position="210,450" zPosition="1" size="100,25" halign="right" font="Regular;20" valign="center" />
<eLabel position="48,98" size="204,274" backgroundColor="#505555" />
<eLabel position="50,100" size="200,270" backgroundColor="#000000" />
<widget source="cutlist" position="50,100" zPosition="1" size="200,270" scrollbarMode="showOnDemand" transparent="1" render="Listbox" >
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(size=(125, 20), text = 1, backcolor = MultiContentTemplateColor(3)),
MultiContentEntryText(pos=(125,0), size=(50, 20), text = 2, flags = RT_HALIGN_RIGHT, backcolor = MultiContentTemplateColor(3))
],
"fonts": [gFont("Regular", 18)],
"itemHeight": 20
}
</convert>
</widget>
<widget name="Timeline" position="50,485" size="615,20" backgroundColor="#505555" pointer="skin_default/position_arrow.png:3,5" foregroundColor="black" />
<ePixmap pixmap="skin_default/icons/mp_buttons.png" position="305,515" size="109,13" alphatest="on" />
</screen>"""
tutorial_seen = False
def __init__(self, session, service):
self.skin = CutListEditor.skin
Screen.__init__(self, session)
self.setTitle(_("Cutlist editor"))
InfoBarSeek.__init__(self, actionmap = "CutlistSeekActions")
InfoBarCueSheetSupport.__init__(self)
InfoBarBase.__init__(self, steal_current_service = True)
HelpableScreen.__init__(self)
self.old_service = session.nav.getCurrentlyPlayingServiceReference()
session.nav.playService(service)
service = session.nav.getCurrentService()
cue = service and service.cueSheet()
if cue is not None:
# disable cutlists. we want to freely browse around in the movie
print "cut lists disabled!"
cue.setCutListEnable(0)
self.downloadCuesheet()
self["Timeline"] = ServicePositionGauge(self.session.nav)
self["cutlist"] = List(self.getCutlist())
self["cutlist"].onSelectionChanged.append(self.selectionChanged)
self["SeekState"] = Label()
self.onPlayStateChanged.append(self.updateStateLabel)
self.updateStateLabel(self.seekstate)
desktopSize = getDesktop(0).size()
self["Video"] = VideoWindow(decoder = 0, fb_width=desktopSize.width(), fb_height=desktopSize.height())
self["actions"] = HelpableActionMap(self, "CutListEditorActions",
{
"setIn": (self.setIn, _("Make this mark an 'in' point")),
"setOut": (self.setOut, _("Make this mark an 'out' point")),
"setMark": (self.setMark, _("Make this mark just a mark")),
"addMark": (self.__addMark, _("Add a mark")),
"removeMark": (self.__removeMark, _("Remove a mark")),
"leave": (self.exit, _("Exit editor")),
"showMenu": (self.showMenu, _("menu")),
}, prio=-4)
self.onExecBegin.append(self.showTutorial)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evCuesheetChanged: self.refillList
})
# to track new entries we save the last version of the cutlist
self.last_cuts = self.getCutlist()
self.cut_start = None
self.inhibit_seek = False
self.onClose.append(self.__onClose)
def __onClose(self):
self.session.nav.playService(self.old_service, forceRestart=True)
def updateStateLabel(self, state):
self["SeekState"].setText(state[3].strip())
def showTutorial(self):
if not CutListEditor.tutorial_seen:
CutListEditor.tutorial_seen = True
self.session.open(MessageBox,_("Welcome to the cutlist editor.\n\nSeek to the start of the stuff you want to cut away. Press OK, select 'start cut'.\n\nThen seek to the end, press OK, select 'end cut'. That's it."), MessageBox.TYPE_INFO)
def checkSkipShowHideLock(self):
pass
def setType(self, index, type):
if len(self.cut_list):
self.cut_list[index] = (self.cut_list[index][0], type)
self["cutlist"].modifyEntry(index, CutListEntry(*self.cut_list[index]))
def setIn(self):
m = self["cutlist"].getIndex()
self.setType(m, 0)
self.uploadCuesheet()
def setOut(self):
m = self["cutlist"].getIndex()
self.setType(m, 1)
self.uploadCuesheet()
def setMark(self):
m = self["cutlist"].getIndex()
self.setType(m, 2)
self.uploadCuesheet()
def __addMark(self):
self.toggleMark(onlyadd=True, tolerance=90000) # do not allow two marks in <1s
def __removeMark(self):
m = self["cutlist"].getCurrent()
m = m and m[0]
if m is not None:
self.removeMark(m)
def exit(self):
self.close()
def getCutlist(self):
r = [ ]
for e in self.cut_list:
r.append(CutListEntry(*e))
return r
def selectionChanged(self):
if not self.inhibit_seek:
where = self["cutlist"].getCurrent()
if where is None:
print "no selection"
return
pts = where[0][0]
seek = self.getSeek()
if seek is None:
print "no seek"
return
seek.seekTo(pts)
def refillList(self):
print "cue sheet changed, refilling"
self.downloadCuesheet()
# get the first changed entry, counted from the end, and select it
new_list = self.getCutlist()
self["cutlist"].list = new_list
l1 = len(new_list)
l2 = len(self.last_cuts)
for i in range(min(l1, l2)):
if new_list[l1-i-1] != self.last_cuts[l2-i-1]:
self["cutlist"].setIndex(l1-i-1)
break
self.last_cuts = new_list
def getStateForPosition(self, pos):
state = -1
for (where, what) in self.cut_list:
if what in [0, 1]:
if where < pos:
state = what
elif where == pos:
state = 1
elif state == -1:
state = 1 - what
if state == -1:
state = 0
return state
def showMenu(self):
curpos = self.cueGetCurrentPosition()
if curpos is None:
return
self.setSeekState(self.SEEK_STATE_PAUSE)
self.context_position = curpos
self.context_nearest_mark = self.toggleMark(onlyreturn=True)
cur_state = self.getStateForPosition(curpos)
if cur_state == 0:
print "currently in 'IN'"
if self.cut_start is None or self.context_position < self.cut_start:
state = CutListContextMenu.SHOW_STARTCUT
else:
state = CutListContextMenu.SHOW_ENDCUT
else:
print "currently in 'OUT'"
state = CutListContextMenu.SHOW_DELETECUT
if self.context_nearest_mark is None:
nearmark = False
else:
nearmark = True
self.session.openWithCallback(self.menuCallback, CutListContextMenu, state, nearmark)
def menuCallback(self, *result):
if not len(result):
return
result = result[0]
if result == CutListContextMenu.RET_STARTCUT:
self.cut_start = self.context_position
elif result == CutListContextMenu.RET_ENDCUT:
# remove in/out marks between the new cut
for (where, what) in self.cut_list[:]:
if self.cut_start <= where <= self.context_position and what in (0,1):
self.cut_list.remove((where, what))
bisect.insort(self.cut_list, (self.cut_start, 1))
bisect.insort(self.cut_list, (self.context_position, 0))
self.uploadCuesheet()
self.cut_start = None
elif result == CutListContextMenu.RET_DELETECUT:
out_before = None
in_after = None
for (where, what) in self.cut_list:
if what == 1 and where <= self.context_position: # out
out_before = (where, what)
elif what == 0 and where < self.context_position: # in, before out
out_before = None
elif what == 0 and where >= self.context_position and in_after is None:
in_after = (where, what)
if out_before is not None:
self.cut_list.remove(out_before)
if in_after is not None:
self.cut_list.remove(in_after)
self.inhibit_seek = True
self.uploadCuesheet()
self.inhibit_seek = False
elif result == CutListContextMenu.RET_MARK:
self.__addMark()
elif result == CutListContextMenu.RET_DELETEMARK:
self.cut_list.remove(self.context_nearest_mark)
self.inhibit_seek = True
self.uploadCuesheet()
self.inhibit_seek = False
elif result == CutListContextMenu.RET_REMOVEBEFORE:
# remove in/out marks before current position
for (where, what) in self.cut_list[:]:
if where <= self.context_position and what in (0,1):
self.cut_list.remove((where, what))
# add 'in' point
bisect.insort(self.cut_list, (self.context_position, 0))
self.inhibit_seek = True
self.uploadCuesheet()
self.inhibit_seek = False
elif result == CutListContextMenu.RET_REMOVEAFTER:
# remove in/out marks after current position
for (where, what) in self.cut_list[:]:
if where >= self.context_position and what in (0,1):
self.cut_list.remove((where, what))
# add 'out' point
bisect.insort(self.cut_list, (self.context_position, 1))
self.inhibit_seek = True
self.uploadCuesheet()
self.inhibit_seek = False
elif result == CutListContextMenu.RET_GRABFRAME:
self.grabFrame()
# we modify the "play" behavior a bit:
# if we press pause while being in slowmotion, we will pause (and not play)
def playpauseService(self):
if self.seekstate != self.SEEK_STATE_PLAY and not self.isStateSlowMotion(self.seekstate):
self.unPauseService()
else:
self.pauseService()
def grabFrame(self):
path = self.session.nav.getCurrentlyPlayingServiceReference().getPath()
from Components.Console import Console
grabConsole = Console()
cmd = 'grab -vblpr%d "%s"' % (180, path.rsplit('.',1)[0] + ".png")
grabConsole.ePopen(cmd)
self.playpauseService()
| kingvuplus/rr | lib/python/Plugins/Extensions/CutListEditor/ui.py | Python | gpl-2.0 | 13,115 |
""" pgp.py
this is where the armorable PGP block objects live
"""
import binascii
import calendar
import collections
import contextlib
import copy
import functools
import itertools
import operator
import os
import re
import warnings
import weakref
import six
from datetime import datetime
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.constant_time import bytes_eq
from .constants import CompressionAlgorithm
from .constants import Features
from .constants import HashAlgorithm
from .constants import ImageEncoding
from .constants import KeyFlags
from .constants import NotationDataFlags
from .constants import PacketTag
from .constants import PubKeyAlgorithm
from .constants import RevocationKeyClass
from .constants import RevocationReason
from .constants import SignatureType
from .constants import SymmetricKeyAlgorithm
from .decorators import KeyAction
from .errors import PGPDecryptionError
from .errors import PGPError
from .packet import Key
from .packet import MDC
from .packet import Packet
from .packet import Primary
from .packet import Private
from .packet import PubKeyV4
from .packet import PubSubKeyV4
from .packet import PrivKeyV4
from .packet import PrivSubKeyV4
from .packet import Public
from .packet import Sub
from .packet import UserID
from .packet import UserAttribute
from .packet.packets import CompressedData
from .packet.packets import IntegrityProtectedSKEData
from .packet.packets import IntegrityProtectedSKEDataV1
from .packet.packets import LiteralData
from .packet.packets import OnePassSignature
from .packet.packets import OnePassSignatureV3
from .packet.packets import PKESessionKey
from .packet.packets import PKESessionKeyV3
from .packet.packets import Signature
from .packet.packets import SignatureV4
from .packet.packets import SKEData
from .packet.packets import Marker
from .packet.packets import SKESessionKey
from .packet.packets import SKESessionKeyV4
from .packet.types import Opaque
from .types import Armorable
from .types import Fingerprint
from .types import ParentRef
from .types import PGPObject
from .types import SignatureVerification
from .types import SorteDeque
__all__ = ['PGPSignature',
'PGPDetachedSignature',
'PGPUID',
'PGPMessage',
'PGPKey',
'PGPKeyring']
class PGPSignature(Armorable, ParentRef, PGPObject):
_revocation_key = collections.namedtuple('revocation_key', ['keyclass','algorithm', 'fingerprint'])
_reason_for_revocation = collections.namedtuple('ReasonForRevocation', ['code', 'comment'])
@property
def __sig__(self):
return self._signature.signature.__sig__()
@property
def cipherprefs(self):
"""
A ``list`` of preferred symmetric algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredSymmetricAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredSymmetricAlgorithms'])).flags
return []
@property
def compprefs(self):
"""
A ``list`` of preferred compression algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredCompressionAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredCompressionAlgorithms'])).flags
return []
@property
def created(self):
"""
A :py:obj:`~datetime.datetime` of when this signature was created.
"""
return self._signature.subpackets['h_CreationTime'][-1].created
@property
def embedded(self):
return self.parent is not None
@property
def expires_at(self):
"""
A :py:obj:`~datetime.datetime` of when this signature expires, if a signature expiration date is specified.
Otherwise, ``None``
"""
if 'SignatureExpirationTime' in self._signature.subpackets:
expd = next(iter(self._signature.subpackets['SignatureExpirationTime'])).expires
return self.created + expd
return None
@property
def exportable(self):
"""
``False`` if this signature is marked as being not exportable. Otherwise, ``True``.
"""
if 'ExportableCertification' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['ExportableCertification'])))
return True
@property
def features(self):
"""
A ``set`` of implementation features specified in this signature, if any. Otherwise, an empty ``set``.
"""
if 'Features' in self._signature.subpackets:
return next(iter(self._signature.subpackets['Features'])).flags
return set()
@property
def hash2(self):
return self._signature.hash2
@property
def hashprefs(self):
"""
A ``list`` of preferred hash algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredHashAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredHashAlgorithms'])).flags
return []
@property
def hash_algorithm(self):
"""
The :py:obj:`~constants.HashAlgorithm` used when computing this signature.
"""
return self._signature.halg
@property
def is_expired(self):
"""
``True`` if the signature has an expiration date, and is expired. Otherwise, ``False``
"""
expires_at = self.expires_at
if expires_at is not None and expires_at != self.created:
return expires_at < datetime.utcnow()
return False
@property
def key_algorithm(self):
"""
The :py:obj:`~constants.PubKeyAlgorithm` of the key that generated this signature.
"""
return self._signature.pubalg
@property
def key_expiration(self):
if 'KeyExpirationTime' in self._signature.subpackets:
return next(iter(self._signature.subpackets['KeyExpirationTime'])).expires
return None
@property
def key_flags(self):
"""
A ``set`` of :py:obj:`~constants.KeyFlags` specified in this signature, if any. Otherwise, an empty ``set``.
"""
if 'KeyFlags' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyFlags'])).flags
return set()
@property
def keyserver(self):
"""
The preferred key server specified in this signature, if any. Otherwise, an empty ``str``.
"""
if 'PreferredKeyServer' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredKeyServer'])).uri
return ''
@property
def keyserverprefs(self):
"""
A ``list`` of :py:obj:`~constants.KeyServerPreferences` in this signature, if any. Otherwise, an empty ``list``.
"""
if 'KeyServerPreferences' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyServerPreferences'])).flags
return []
@property
def magic(self):
return "SIGNATURE"
@property
def notation(self):
"""
A ``dict`` of notation data in this signature, if any. Otherwise, an empty ``dict``.
"""
return dict((nd.name, nd.value) for nd in self._signature.subpackets['NotationData'])
@property
def policy_uri(self):
"""
The policy URI specified in this signature, if any. Otherwise, an empty ``str``.
"""
if 'Policy' in self._signature.subpackets:
return next(iter(self._signature.subpackets['Policy'])).uri
return ''
@property
def revocable(self):
"""
``False`` if this signature is marked as being not revocable. Otherwise, ``True``.
"""
if 'Revocable' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['Revocable'])))
return True
@property
def revocation_key(self):
"""
A ``list`` of revocation key subpackets in this signature, if any. Otherwise, an empty ``list``.
Each is a namedtuple with the following attributes:
``revocation_key.keyclass`` - a ``list`` of :py:obj:`~pgpy.constants.RevocationKeyClass` flags.
``revocation_key.algorithm`` - the :py:obj:`~pgpy.constants.PubkeyAlgorithm` of the revocation key.
``revocation_key.fingerprint`` - the :py:obj:`~pgpy.types.Fingerprint` of the revocation key.
"""
return list(self._revocation_key(rk.keyclass, rk.algorithm, rk.fingerprint)
for rk in self._signature.subpackets['RevocationKey'])
@property
def revocation_reason(self):
if 'ReasonForRevocation' in self._signature.subpackets:
subpacket = next(iter(self._signature.subpackets['ReasonForRevocation']))
return self._reason_for_revocation(subpacket.code, subpacket.string)
return None
@property
def signer(self):
"""
The 16-character Key ID of the key that generated this signature.
"""
return self._signature.signer
@property
def signer_fingerprint(self):
"""
The fingerprint of the key that generated this signature, if it contained. Otherwise, an empty ``str``.
"""
if 'IssuerFingerprint' in self._signature.subpackets:
return next(iter(self._signature.subpackets['IssuerFingerprint'])).issuer_fingerprint
return ''
@property
def target_signature(self):
return NotImplemented
@property
def type(self):
"""
The :py:obj:`~constants.SignatureType` of this signature.
"""
return self._signature.sigtype
@classmethod
def new(cls, sigtype, pkalg, halg, signer):
sig = PGPSignature()
sigpkt = SignatureV4()
sigpkt.header.tag = 2
sigpkt.header.version = 4
sigpkt.subpackets.addnew('CreationTime', hashed=True, created=datetime.utcnow())
sigpkt.subpackets.addnew('Issuer', _issuer=signer)
sigpkt.sigtype = sigtype
sigpkt.pubalg = pkalg
if halg is not None:
sigpkt.halg = halg
sig._signature = sigpkt
return sig
def __init__(self):
"""
PGPSignature objects represent OpenPGP compliant signatures.
PGPSignature implements the ``__str__`` method, the output of which will be the signature object in
OpenPGP-compliant ASCII-armored format.
PGPSignature implements the ``__bytes__`` method, the output of which will be the signature object in
OpenPGP-compliant binary format.
"""
super(PGPSignature, self).__init__()
self._signature = None
def __bytearray__(self):
return self._signature.__bytearray__()
def __repr__(self):
return "<PGPSignature [{:s}] object at 0x{:02x}>".format(self.type.name, id(self))
def __lt__(self, other):
return self.created < other.created
def __or__(self, other):
if isinstance(other, Signature):
if self._signature is None:
self._signature = other
return self
##TODO: this is not a great way to do this
if other.__class__.__name__ == 'EmbeddedSignature':
self._signature = other
return self
raise TypeError
def __iter__(self):
yield self._signature
def __hash__(self):
return hash(self.__sig__)
def __eq__(self, other):
if isinstance(other, PGPSignature):
return bytes_eq(self.__sig__, other.__sig__)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __copy__(self):
# because the default shallow copy isn't actually all that useful,
# and deepcopy does too much work
sig = super(PGPSignature, self).__copy__()
# sig = PGPSignature()
# sig.ascii_headers = self.ascii_headers.copy()
sig |= copy.copy(self._signature)
return sig
def hashdata(self, subject):
_data = bytearray()
if isinstance(subject, six.string_types):
subject = subject.encode('latin-1')
"""
All signatures are formed by producing a hash over the signature
data, and then using the resulting hash in the signature algorithm.
"""
if self.type == SignatureType.BinaryDocument:
"""
For binary document signatures (type 0x00), the document data is
hashed directly.
"""
if isinstance(subject, (SKEData, IntegrityProtectedSKEData)):
_data += subject.__bytearray__()
else:
_data += bytearray(subject)
if self.type == SignatureType.CanonicalDocument:
"""
For text document signatures (type 0x01), the
document is canonicalized by converting line endings to <CR><LF>,
and the resulting data is hashed.
"""
_data += re.subn(br'\r?\n', b'\r\n', subject)[0]
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation, SignatureType.Subkey_Binding,
SignatureType.PrimaryKey_Binding}:
"""
When a signature is made over a key, the hash data starts with the
octet 0x99, followed by a two-octet length of the key, and then body
of the key packet. (Note that this is an old-style packet header for
a key packet with two-octet length.) ...
Key revocation signatures (types 0x20 and 0x28)
hash only the key being revoked.
"""
_s = b''
if isinstance(subject, PGPUID):
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and not subject.is_primary:
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and subject.is_primary:
_s = subject.hashdata
if len(_s) > 0:
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Subkey_Binding, SignatureType.PrimaryKey_Binding}:
"""
A subkey binding signature
(type 0x18) or primary key binding signature (type 0x19) then hashes
the subkey using the same format as the main key (also using 0x99 as
the first octet).
"""
if subject.is_primary:
_s = subject.subkeys[self.signer].hashdata
else:
_s = subject.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.KeyRevocation, SignatureType.SubkeyRevocation, SignatureType.DirectlyOnKey}:
"""
The signature is calculated directly on the key being revoked. A
revoked key is not to be used. Only revocation signatures by the
key being revoked, or by an authorized revocation key, should be
considered valid revocation signatures.
Subkey revocation signature
The signature is calculated directly on the subkey being revoked.
A revoked subkey is not to be used. Only revocation signatures
by the top-level signature key that is bound to this subkey, or
by an authorized revocation key, should be considered valid
revocation signatures.
Signature directly on a key
This signature is calculated directly on a key. It binds the
information in the Signature subpackets to the key, and is
appropriate to be used for subpackets that provide information
about the key, such as the Revocation Key subpacket. It is also
appropriate for statements that non-self certifiers want to make
about the key itself, rather than the binding between a key and a
name.
"""
_s = subject.hashdata
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation}:
"""
A certification signature (type 0x10 through 0x13) hashes the User
ID being bound to the key into the hash context after the above
data. ... A V4 certification
hashes the constant 0xB4 for User ID certifications or the constant
0xD1 for User Attribute certifications, followed by a four-octet
number giving the length of the User ID or User Attribute data, and
then the User ID or User Attribute data.
...
The [certificate revocation] signature
is computed over the same data as the certificate that it
revokes, and should have a later creation date than that
certificate.
"""
_s = subject.hashdata
if subject.is_uid:
_data += b'\xb4'
else:
_data += b'\xd1'
_data += self.int_to_bytes(len(_s), 4) + _s
# if this is a new signature, do update_hlen
if 0 in list(self._signature.signature):
self._signature.update_hlen()
"""
Once the data body is hashed, then a trailer is hashed. (...)
A V4 signature hashes the packet body
starting from its first field, the version number, through the end
of the hashed subpacket data. Thus, the fields hashed are the
signature version, the signature type, the public-key algorithm, the
hash algorithm, the hashed subpacket length, and the hashed
subpacket body.
V4 signatures also hash in a final trailer of six octets: the
version of the Signature packet, i.e., 0x04; 0xFF; and a four-octet,
big-endian number that is the length of the hashed data from the
Signature packet (note that this number does not include these final
six octets).
"""
hcontext = bytearray()
hcontext.append(self._signature.header.version if not self.embedded else self._signature._sig.header.version)
hcontext.append(self.type)
hcontext.append(self.key_algorithm)
hcontext.append(self.hash_algorithm)
hcontext += self._signature.subpackets.__hashbytearray__()
hlen = len(hcontext)
_data += hcontext
_data += b'\x04\xff'
_data += self.int_to_bytes(hlen, 4)
return bytes(_data)
def make_onepass(self):
onepass = OnePassSignatureV3()
onepass.sigtype = self.type
onepass.halg = self.hash_algorithm
onepass.pubalg = self.key_algorithm
onepass.signer = self.signer
onepass.update_hlen()
return onepass
def parse(self, packet):
unarmored = self.ascii_unarmor(packet)
data = unarmored['body']
if unarmored['magic'] is not None and unarmored['magic'] != 'SIGNATURE':
raise ValueError('Expected: SIGNATURE. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
# load *one* packet from data
pkt = Packet(data)
if pkt.header.tag == PacketTag.Signature and not isinstance(pkt, Opaque):
self._signature = pkt
else:
raise ValueError('Expected: Signature. Got: {:s}'.format(pkt.__class__.__name__))
class PGPDetachedSignature(Armorable, PGPObject):
@property
def magic(self):
return 'SIGNATURE'
@property
def signatures(self):
return list(self._signatures)
@property
def signers(self):
return set(m.signer for m in self._signatures)
def __init__(self):
super(PGPDetachedSignature, self).__init__()
self._signatures = SorteDeque()
def __iter__(self):
for sig in self._signatures:
yield sig
def __or__(self, other):
if isinstance(other, PGPDetachedSignature):
for sig in other:
self._signatures.insort(sig)
return self
if isinstance(other, Signature):
other = PGPSignature() | other
if isinstance(other, PGPSignature):
self._signatures.insort(other)
return self
if isinstance(other, OnePassSignature):
return self
raise NotImplementedError(str(type(other)))
def __copy__(self):
detached = PGPDetachedSignature()
for sig in self._signatures:
detached |= copy.copy(sig)
return detached
def __bytearray__(self):
_bytes = bytearray()
for sig in self:
_bytes += sig.__bytearray__()
return _bytes
def parse(self, packet):
unarmored = self.ascii_unarmor(packet)
data = unarmored['body']
if unarmored['magic'] is not None and unarmored['magic'] != 'SIGNATURE':
raise ValueError('Expected: SIGNATURE. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
pkt = Packet(data)
while pkt.header.tag == PacketTag.Signature and not isinstance(pkt, Opaque):
self |= PGPSignature() | pkt
if len(data) == 0:
break
pkt = Packet(data)
else:
raise ValueError('Expected: Signature. Got: {:s}'.format(pkt.__class__.__name__))
class PGPUID(ParentRef):
@property
def name(self):
"""If this is a User ID, the stored name. If this is not a User ID, this will be an empty string."""
return self._uid.name if isinstance(self._uid, UserID) else ""
@property
def comment(self):
"""
If this is a User ID, this will be the stored comment. If this is not a User ID, or there is no stored comment,
this will be an empty string.,
"""
return self._uid.comment if isinstance(self._uid, UserID) else ""
@property
def email(self):
"""
If this is a User ID, this will be the stored email address. If this is not a User ID, or there is no stored
email address, this will be an empty string.
"""
return self._uid.email if isinstance(self._uid, UserID) else ""
@property
def image(self):
"""
If this is a User Attribute, this will be the stored image. If this is not a User Attribute, this will be ``None``.
"""
return self._uid.image.image if isinstance(self._uid, UserAttribute) else None
@property
def is_primary(self):
"""
If the most recent, valid self-signature specifies this as being primary, this will be True. Otherwise, Faqlse.
"""
return bool(next(iter(self.selfsig._signature.subpackets['h_PrimaryUserID']), False))
@property
def is_uid(self):
"""
``True`` if this is a User ID, otherwise False.
"""
return isinstance(self._uid, UserID)
@property
def is_ua(self):
"""
``True`` if this is a User Attribute, otherwise False.
"""
return isinstance(self._uid, UserAttribute)
@property
def selfsig(self):
"""
This will be the most recent, self-signature of this User ID or Attribute. If there isn't one, this will be ``None``.
"""
if self.parent is not None:
return next((sig for sig in reversed(self._signatures) if sig.signer == self.parent.fingerprint.keyid), None)
@property
def signers(self):
"""
This will be a set of all of the key ids which have signed this User ID or Attribute.
"""
return set(s.signer for s in self._signatures)
@property
def signatures(self):
"""A ``list`` containing all signatures present in this User ID or Attribute."""
return list(self._signatures)
@property
def hashdata(self):
if self.is_uid:
return self._uid.__bytearray__()[len(self._uid.header):]
if self.is_ua:
return self._uid.subpackets.__bytearray__()
@classmethod
def new(cls, pn, comment="", email=""):
"""
Create a new User ID or photo.
:param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo.
Otherwise, it will be used as the name field for a User ID.
:type pn: ``bytearray``, ``str``, ``unicode``
:param comment: The comment field for a User ID. Ignored if this is a photo.
:type comment: ``str``, ``unicode``
:param email: The email address field for a User ID. Ignored if this is a photo.
:type email: ``str``, ``unicode``
:returns: :py:obj:`PGPUID`
"""
uid = PGPUID()
if isinstance(pn, bytearray):
uid._uid = UserAttribute()
uid._uid.image.image = pn
uid._uid.image.iencoding = ImageEncoding.encodingof(pn)
uid._uid.update_hlen()
else:
uid._uid = UserID()
uid._uid.name = pn
uid._uid.comment = comment
uid._uid.email = email
uid._uid.update_hlen()
return uid
def __init__(self):
"""
PGPUID objects represent User IDs and User Attributes for keys.
PGPUID implements the ``__format__`` method for User IDs, returning a string in the format
'name (comment) <email>', leaving out any comment or email fields that are not present.
"""
super(PGPUID, self).__init__()
self._uid = None
self._signatures = SorteDeque()
def __repr__(self):
if self.selfsig is not None:
return "<PGPUID [{:s}][{}] at 0x{:02X}>".format(self._uid.__class__.__name__, self.selfsig.created, id(self))
return "<PGPUID [{:s}] at 0x{:02X}>".format(self._uid.__class__.__name__, id(self))
def __lt__(self, other): # pragma: no cover
if self.is_uid == other.is_uid:
if self.is_primary == other.is_primary:
return self.selfsig > other.selfsig
if self.is_primary:
return True
return False
if self.is_uid and other.is_ua:
return True
if self.is_ua and other.is_uid:
return False
def __iter__(self):
yield self._uid
for signature in self._signatures:
for sigpacket in signature:
yield sigpacket
def __or__(self, other):
if isinstance(other, PGPSignature):
self._signatures.insort(other)
if self.parent is not None and self in self.parent._uids:
self.parent._uids.resort(self)
return self
if isinstance(other, UserID) and self._uid is None:
self._uid = other
return self
if isinstance(other, UserAttribute) and self._uid is None:
self._uid = other
return self
raise TypeError("unsupported operand type(s) for |: '{:s}' and '{:s}'"
"".format(self.__class__.__name__, other.__class__.__name__))
def __hash__(self):
if self.is_uid:
return hash((self.name, self.comment, self.email, self.is_primary))
if self.is_ua:
return hash(self.image)
return 0 # should only be reached for clean PGPUID()
def __eq__(self, other):
if isinstance(other, PGPUID):
if self.is_uid and other.is_uid:
return self.name == other.name and self.comment == other.comment and self.email == other.email and self.is_primary == other.is_primary
if self.is_ua and other.is_ua:
return self.image == other.image
return False
def __ne__(self, other):
return not self.__eq__(other)
def __copy__(self):
# because the default shallow copy isn't actually all that useful,
# and deepcopy does too much work
uid = PGPUID()
uid |= copy.copy(self._uid)
for sig in self._signatures:
uid |= copy.copy(sig)
return uid
def __format__(self, format_spec):
if self.is_uid:
comment = six.u("") if self.comment == "" else six.u(" ({:s})").format(self.comment)
email = six.u("") if self.email == "" else six.u(" <{:s}>").format(self.email)
return six.u("{:s}{:s}{:s}").format(self.name, comment, email)
raise NotImplementedError
class PGPMessage(Armorable, PGPObject):
@staticmethod
def dash_unescape(text):
return re.subn(r'^- -', '-', text, flags=re.MULTILINE)[0]
@staticmethod
def dash_escape(text):
return re.subn(r'^-', '- -', text, flags=re.MULTILINE)[0]
@property
def encrypters(self):
"""A ``set`` containing all key ids (if any) to which this message was encrypted."""
return set(m.encrypter for m in self._sessionkeys if isinstance(m, PKESessionKey))
@property
def filename(self):
"""If applicable, returns the original filename of the message. Otherwise, returns an empty string."""
if self.type == 'literal':
return self._message.filename
return ''
@property
def is_compressed(self):
"""``True`` if this message will be compressed when exported"""
return self._compression != CompressionAlgorithm.Uncompressed
@property
def is_encrypted(self):
"""``True`` if this message is encrypted; otherwise, ``False``"""
return isinstance(self._message, (SKEData, IntegrityProtectedSKEData))
@property
def is_sensitive(self):
"""``True`` if this message is marked sensitive; otherwise ``False``"""
return self.type == 'literal' and self._message.filename == '_CONSOLE'
@property
def is_signed(self):
"""
``True`` if this message is signed; otherwise, ``False``.
Should always be ``False`` if the message is encrypted.
"""
return len(self._signatures) > 0
@property
def issuers(self):
"""A ``set`` containing all key ids (if any) which have signed or encrypted this message."""
return self.encrypters | self.signers
@property
def magic(self):
if self.type == 'cleartext':
return "SIGNATURE"
return "MESSAGE"
@property
def message(self):
"""The message contents"""
if self.type == 'cleartext':
return self.bytes_to_text(self._message)
if self.type == 'literal':
return self._message.contents
if self.type == 'encrypted':
return self._message
@property
def signatures(self):
"""A ``list`` containing all signatures present in this message."""
return list(self._signatures)
@property
def signers(self):
"""A ``set`` containing all key ids (if any) which have signed this message."""
return set(m.signer for m in self._signatures)
@property
def detached_signature(self):
_sig = PGPDetachedSignature()
for sig in self._signatures:
_sig |= sig
return _sig
@property
def type(self):
##TODO: it might be better to use an Enum for the output of this
if isinstance(self._message, (six.string_types, six.binary_type, bytearray)):
return 'cleartext'
if isinstance(self._message, LiteralData):
return 'literal'
if isinstance(self._message, (SKEData, IntegrityProtectedSKEData)):
return 'encrypted'
raise NotImplementedError
def __init__(self):
"""
PGPMessage objects represent OpenPGP message compositions.
PGPMessage implements the `__str__` method, the output of which will be the message composition in
OpenPGP-compliant ASCII-armored format.
PGPMessage implements the `__bytes__` method, the output of which will be the message composition in
OpenPGP-compliant binary format.
Any signatures within the PGPMessage that are marked as being non-exportable will not be included in the output
of either of those methods.
"""
super(PGPMessage, self).__init__()
self._compression = CompressionAlgorithm.Uncompressed
self._message = None
self._mdc = None
self._signatures = SorteDeque()
self._sessionkeys = []
def __bytearray__(self):
if self.is_compressed:
comp = CompressedData()
comp.calg = self._compression
comp.packets = [pkt for pkt in self]
comp.update_hlen()
return comp.__bytearray__()
_bytes = bytearray()
for pkt in self:
_bytes += pkt.__bytearray__()
return _bytes
def __str__(self):
if self.type == 'cleartext':
tmpl = u"-----BEGIN PGP SIGNED MESSAGE-----\n" \
u"{hhdr:s}\n" \
u"{cleartext:s}\n" \
u"{signature:s}"
# only add a Hash: header if we actually have at least one signature
hashes = set(s.hash_algorithm.name for s in self.signatures)
hhdr = 'Hash: {hashes:s}\n'.format(hashes=','.join(sorted(hashes))) if hashes else ''
return tmpl.format(hhdr=hhdr,
cleartext=self.dash_escape(self.bytes_to_text(self._message)),
signature=super(PGPMessage, self).__str__())
return super(PGPMessage, self).__str__()
def __iter__(self):
if self.type == 'cleartext':
for sig in self._signatures:
for pkt in sig:
yield pkt
elif self.is_encrypted:
for sig in self._signatures:
for pkt in sig:
yield pkt
for pkt in self._sessionkeys:
yield pkt
yield self.message
else:
##TODO: is it worth coming up with a way of disabling one-pass signing?
for sig in self._signatures:
ops = sig.make_onepass()
if sig is not self._signatures[-1]:
ops.nested = True
yield ops
yield self._message
if self._mdc is not None: # pragma: no cover
yield self._mdc
for sig in self._signatures:
for pkt in sig:
yield pkt
def __or__(self, other):
if isinstance(other, Marker):
return self
if isinstance(other, CompressedData):
self._compression = other.calg
for pkt in other.packets:
self |= pkt
return self
if isinstance(other, (six.string_types, six.binary_type, bytearray)):
if self._message is None:
self._message = self.text_to_bytes(other)
return self
if isinstance(other, (LiteralData, SKEData, IntegrityProtectedSKEData)):
if self._message is None:
self._message = other
return self
if isinstance(other, MDC):
if self._mdc is None:
self._mdc = other
return self
if isinstance(other, OnePassSignature):
# these are "generated" on the fly during composition
return self
if isinstance(other, Signature):
other = PGPSignature() | other
if isinstance(other, PGPSignature):
self._signatures.insort(other)
return self
if isinstance(other, PGPDetachedSignature):
for sig in other:
self._signatures.insort(sig)
return self
if isinstance(other, (PKESessionKey, SKESessionKey)):
self._sessionkeys.append(other)
return self
if isinstance(other, PGPMessage):
self._message = other._message
self._mdc = other._mdc
self._compression = other._compression
self._sessionkeys += other._sessionkeys
self._signatures += other._signatures
return self
raise NotImplementedError(str(type(other)))
def __copy__(self):
msg = super(PGPMessage, self).__copy__()
msg._compression = self._compression
msg._message = copy.copy(self._message)
msg._mdc = copy.copy(self._mdc)
for sig in self._signatures:
msg |= copy.copy(sig)
for sk in self._sessionkeys:
msg |= copy.copy(sk)
return msg
@classmethod
def new(cls, message, **kwargs):
"""
Create a new PGPMessage object.
:param message: The message to be stored.
:type message: ``str``, ``unicode``, ``bytes``, ``bytearray``
:returns: :py:obj:`PGPMessage`
The following optional keyword arguments can be used with :py:meth:`PGPMessage.new`:
:keyword file: if True, ``message`` should be a path to a file. The contents of that file will be read and used
as the contents of the message.
:type file: ``bool``
:keyword cleartext: if True, the message will be cleartext with inline signatures.
:type cleartext: ``bool``
:keyword sensitive: if True, the filename will be set to '_CONSOLE' to signal other OpenPGP clients to treat
this message as being 'for your eyes only'. Ignored if cleartext is True.
:type sensitive: ``bool``
:keyword format: Set the message format identifier. Ignored if cleartext is True.
:type format: ``str``
:keyword compression: Set the compression algorithm for the new message.
Defaults to :py:obj:`CompressionAlgorithm.ZIP`. Ignored if cleartext is True.
:keyword encoding: Set the Charset header for the message.
:type encoding: ``str`` representing a valid codec in codecs
"""
# TODO: have 'codecs' above (in :type encoding:) link to python documentation page on codecs
cleartext = kwargs.pop('cleartext', False)
format = kwargs.pop('format', None)
sensitive = kwargs.pop('sensitive', False)
compression = kwargs.pop('compression', CompressionAlgorithm.ZIP)
file = kwargs.pop('file', False)
charset = kwargs.pop('encoding', None)
filename = ''
mtime = datetime.utcnow()
msg = PGPMessage()
if charset:
msg.charset = charset
# if format in 'tu' and isinstance(message, (six.binary_type, bytearray)):
# # if message format is text or unicode and we got binary data, we'll need to transcode it to UTF-8
# message =
if file and os.path.isfile(message):
filename = message
message = bytearray(os.path.getsize(filename))
mtime = datetime.utcfromtimestamp(os.path.getmtime(filename))
with open(filename, 'rb') as mf:
mf.readinto(message)
# if format is None, we can try to detect it
if format is None:
if isinstance(message, six.text_type):
# message is definitely UTF-8 already
format = 'u'
elif cls.is_ascii(message):
# message is probably text
format = 't'
else:
# message is probably binary
format = 'b'
# if message is a binary type and we're building a textual message, we need to transcode the bytes to UTF-8
if isinstance(message, (six.binary_type, bytearray)) and (cleartext or format in 'tu'):
message = message.decode(charset or 'utf-8')
if cleartext:
msg |= message
else:
# load literal data
lit = LiteralData()
lit._contents = bytearray(msg.text_to_bytes(message))
lit.filename = '_CONSOLE' if sensitive else os.path.basename(filename)
lit.mtime = mtime
lit.format = format
# if cls.is_ascii(message):
# lit.format = 't'
lit.update_hlen()
msg |= lit
msg._compression = compression
return msg
def encrypt(self, passphrase, sessionkey=None, **prefs):
"""
Encrypt the contents of this message using a passphrase.
:param passphrase: The passphrase to use for encrypting this message.
:type passphrase: ``str``, ``unicode``, ``bytes``
:optional param sessionkey: Provide a session key to use when encrypting something. Default is ``None``.
If ``None``, a session key of the appropriate length will be generated randomly.
.. warning::
Care should be taken when making use of this option! Session keys *absolutely need*
to be unpredictable! Use the ``gen_key()`` method on the desired
:py:obj:`~constants.SymmetricKeyAlgorithm` to generate the session key!
:type sessionkey: ``bytes``, ``str``
:raises: :py:exc:`~errors.PGPEncryptionError`
:returns: A new :py:obj:`PGPMessage` containing the encrypted contents of this message.
"""
cipher_algo = prefs.pop('cipher', SymmetricKeyAlgorithm.AES256)
hash_algo = prefs.pop('hash', HashAlgorithm.SHA256)
# set up a new SKESessionKeyV4
skesk = SKESessionKeyV4()
skesk.s2k.usage = 255
skesk.s2k.specifier = 3
skesk.s2k.halg = hash_algo
skesk.s2k.encalg = cipher_algo
skesk.s2k.count = skesk.s2k.halg.tuned_count
if sessionkey is None:
sessionkey = cipher_algo.gen_key()
skesk.encrypt_sk(passphrase, sessionkey)
del passphrase
msg = PGPMessage() | skesk
if not self.is_encrypted:
skedata = IntegrityProtectedSKEDataV1()
skedata.encrypt(sessionkey, cipher_algo, self.__bytes__())
msg |= skedata
else:
msg |= self
return msg
def decrypt(self, passphrase):
"""
Attempt to decrypt this message using a passphrase.
:param passphrase: The passphrase to use to attempt to decrypt this message.
:type passphrase: ``str``, ``unicode``, ``bytes``
:raises: :py:exc:`~errors.PGPDecryptionError` if decryption failed for any reason.
:returns: A new :py:obj:`PGPMessage` containing the decrypted contents of this message
"""
if not self.is_encrypted:
raise PGPError("This message is not encrypted!")
for skesk in iter(sk for sk in self._sessionkeys if isinstance(sk, SKESessionKey)):
try:
symalg, key = skesk.decrypt_sk(passphrase)
decmsg = PGPMessage()
decmsg.parse(self.message.decrypt(key, symalg))
except (TypeError, ValueError, NotImplementedError, PGPDecryptionError):
continue
else:
del passphrase
break
else:
raise PGPDecryptionError("Decryption failed")
return decmsg
def parse(self, packet):
unarmored = self.ascii_unarmor(packet)
data = unarmored['body']
if unarmored['magic'] is not None and unarmored['magic'] not in ['MESSAGE', 'SIGNATURE']:
raise ValueError('Expected: MESSAGE. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
# cleartext signature
if unarmored['magic'] == 'SIGNATURE':
# the composition for this will be the 'cleartext' as a str,
# followed by one or more signatures (each one loaded into a PGPSignature)
self |= self.dash_unescape(unarmored['cleartext'])
while len(data) > 0:
pkt = Packet(data)
if not isinstance(pkt, Signature): # pragma: no cover
warnings.warn("Discarded unexpected packet: {:s}".format(pkt.__class__.__name__), stacklevel=2)
continue
self |= PGPSignature() | pkt
else:
while len(data) > 0:
self |= Packet(data)
class PGPKey(Armorable, ParentRef, PGPObject):
"""
11.1. Transferable Public Keys
OpenPGP users may transfer public keys. The essential elements of a
transferable public key are as follows:
- One Public-Key packet
- Zero or more revocation signatures
- One or more User ID packets
- After each User ID packet, zero or more Signature packets
(certifications)
- Zero or more User Attribute packets
- After each User Attribute packet, zero or more Signature packets
(certifications)
- Zero or more Subkey packets
- After each Subkey packet, one Signature packet, plus optionally a
revocation
The Public-Key packet occurs first. Each of the following User ID
packets provides the identity of the owner of this public key. If
there are multiple User ID packets, this corresponds to multiple
means of identifying the same unique individual user; for example, a
user may have more than one email address, and construct a User ID
for each one.
Immediately following each User ID packet, there are zero or more
Signature packets. Each Signature packet is calculated on the
immediately preceding User ID packet and the initial Public-Key
packet. The signature serves to certify the corresponding public key
and User ID. In effect, the signer is testifying to his or her
belief that this public key belongs to the user identified by this
User ID.
Within the same section as the User ID packets, there are zero or
more User Attribute packets. Like the User ID packets, a User
Attribute packet is followed by zero or more Signature packets
calculated on the immediately preceding User Attribute packet and the
initial Public-Key packet.
User Attribute packets and User ID packets may be freely intermixed
in this section, so long as the signatures that follow them are
maintained on the proper User Attribute or User ID packet.
After the User ID packet or Attribute packet, there may be zero or
more Subkey packets. In general, subkeys are provided in cases where
the top-level public key is a signature-only key. However, any V4
key may have subkeys, and the subkeys may be encryption-only keys,
signature-only keys, or general-purpose keys. V3 keys MUST NOT have
subkeys.
Each Subkey packet MUST be followed by one Signature packet, which
should be a subkey binding signature issued by the top-level key.
For subkeys that can issue signatures, the subkey binding signature
MUST contain an Embedded Signature subpacket with a primary key
binding signature (0x19) issued by the subkey on the top-level key.
Subkey and Key packets may each be followed by a revocation Signature
packet to indicate that the key is revoked. Revocation signatures
are only accepted if they are issued by the key itself, or by a key
that is authorized to issue revocations via a Revocation Key
subpacket in a self-signature by the top-level key.
Transferable public-key packet sequences may be concatenated to allow
transferring multiple public keys in one operation.
11.2. Transferable Secret Keys
OpenPGP users may transfer secret keys. The format of a transferable
secret key is the same as a transferable public key except that
secret-key and secret-subkey packets are used instead of the public
key and public-subkey packets. Implementations SHOULD include self-
signatures on any user IDs and subkeys, as this allows for a complete
public key to be automatically extracted from the transferable secret
key. Implementations MAY choose to omit the self-signatures,
especially if a transferable public key accompanies the transferable
secret key.
"""
__zero_keyid = bytearray(8)
__zero_keyid_str = '0000000000000000'
@property
def created(self):
"""A :py:obj:`~datetime.datetime` object of the creation date and time of the key, in UTC."""
return self._key.created
@property
def expires_at(self):
"""A :py:obj:`~datetime.datetime` object of when this key is to be considered expired, if any. Otherwise, ``None``"""
try:
expires = min(sig.key_expiration for sig in itertools.chain(iter(uid.selfsig for uid in self.userids), self.self_signatures)
if sig.key_expiration is not None)
except ValueError:
return None
else:
return (self.created + expires)
@property
def fingerprint(self):
"""The fingerprint of this key, as a :py:obj:`~pgpy.types.Fingerprint` object."""
if self._key:
return self._key.fingerprint
@property
def hashdata(self):
# when signing a key, only the public portion of the keys is hashed
# if this is a private key, the private components of the key material need to be left out
if self.is_public:
return self._key.__bytearray__()[len(self._key.header):]
pub = self._key.pubkey()
return pub.__bytearray__()[len(pub.header):]
@property
def is_expired(self):
"""``True`` if this key is expired, otherwise ``False``"""
expires = self.expires_at
if expires is not None:
return expires <= datetime.utcnow()
return False
@property
def is_primary(self):
"""``True`` if this is a primary key; ``False`` if this is a subkey"""
return isinstance(self._key, Primary) and not isinstance(self._key, Sub)
@property
def is_protected(self):
"""``True`` if this is a private key that is protected with a passphrase, otherwise ``False``"""
if self.is_public:
return False
return self._key.protected
@property
def is_public(self):
"""``True`` if this is a public key, otherwise ``False``"""
return isinstance(self._key, Public) and not isinstance(self._key, Private)
@property
def is_unlocked(self):
"""``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``"""
if self.is_public:
return True
if not self.is_protected:
return True
return self._key.unlocked
@property
def key_material(self):
return self._key.keymaterial
@property
def key_algorithm(self):
"""The :py:obj:`constants.PubKeyAlgorithm` pertaining to this key"""
return self._key.pkalg
@property
def key_size(self):
"""*new in 0.4.1*
The size pertaining to this key. ``int`` for non-EC key algorithms; :py:obj:`constants.EllipticCurveOID` for EC keys.
"""
if self.key_algorithm in {PubKeyAlgorithm.ECDSA, PubKeyAlgorithm.ECDH}:
return self._key.keymaterial.oid
return next(iter(self._key.keymaterial)).bit_length()
@property
def magic(self):
return '{:s} KEY BLOCK'.format('PUBLIC' if (isinstance(self._key, Public) and not isinstance(self._key, Private)) else
'PRIVATE' if isinstance(self._key, Private) else '')
@property
def pubkey(self):
"""If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with
all the trimmings. Otherwise, returns ``None``
"""
if not self.is_public:
if self._sibling is None or isinstance(self._sibling, weakref.ref):
# create a new key shell
pub = PGPKey()
pub.ascii_headers = self.ascii_headers.copy()
# get the public half of the primary key
pub._key = self._key.pubkey()
# get the public half of each subkey
for skid, subkey in self.subkeys.items():
pub |= subkey.pubkey
# copy user ids and user attributes
for uid in self._uids:
pub |= copy.copy(uid)
# copy signatures that weren't copied with uids
for sig in self._signatures:
if sig.parent is None:
pub |= copy.copy(sig)
# keep connect the two halves using a weak reference
self._sibling = weakref.ref(pub)
pub._sibling = weakref.ref(self)
return self._sibling()
return None
@pubkey.setter
def pubkey(self, pubkey):
if self.is_public:
raise TypeError("cannot add public sibling to pubkey")
if not pubkey.is_public:
raise TypeError("sibling must be public")
if self._sibling is not None and self._sibling() is not None:
raise ValueError("public key reference already set")
if pubkey.fingerprint != self.fingerprint:
raise ValueError("key fingerprint mismatch")
# TODO: sync packets with sibling
self._sibling = weakref.ref(pubkey)
pubkey._sibling = weakref.ref(self)
@property
def self_signatures(self):
keyid, keytype = (self.fingerprint.keyid, SignatureType.DirectlyOnKey) if self.is_primary \
else (self.parent.fingerprint.keyid, SignatureType.Subkey_Binding)
##TODO: filter out revoked signatures as well
for sig in iter(sig for sig in self._signatures
if all([sig.type == keytype, sig.signer == keyid, not sig.is_expired])):
yield sig
@property
def signers(self):
"""A ``set`` of key ids of keys that were used to sign this key"""
return {sig.signer for sig in self._signatures}
@property
def signatures(self):
"""A ``list`` containing all signatures present in this key.."""
return list(self._signatures)
@property
def revocation_signatures(self):
keyid, keytype = (self.fingerprint.keyid, SignatureType.KeyRevocation) if self.is_primary \
else (self.parent.fingerprint.keyid, SignatureType.SubkeyRevocation)
for sig in iter(sig for sig in self._signatures
if all([sig.type == keytype, sig.signer == keyid, not sig.is_expired])):
yield sig
@property
def subkeys(self):
"""An :py:obj:`~collections.OrderedDict` of subkeys bound to this primary key, if applicable,
selected by 16-character keyid."""
return self._children
@property
def userids(self):
"""A ``list`` of :py:obj:`PGPUID` objects containing User ID information about this key"""
return [ u for u in self._uids if u.is_uid ]
@property
def userattributes(self):
"""A ``list`` of :py:obj:`PGPUID` objects containing one or more images associated with this key"""
return [u for u in self._uids if u.is_ua]
def usage_flags(self, user=None):
"""
Get the set of usage flags this key has.
:param user: If specified, it is used to select the uid from which flags are loaded.
A text string to match name, comment, or email address against.
:type user: ``str``, ``unicode``
:return: a ``set`` of :py:obj:`KeyFlags` of this key.
"""
if self.is_primary:
if user is not None:
user = self.get_uid(user)
elif len(self._uids) == 0:
return {KeyFlags.Certify}
else:
user = next(iter(self.userids))
# RFC 4880 says that primary keys *must* be capable of certification
return {KeyFlags.Certify} | user.selfsig.key_flags
return next(self.self_signatures).key_flags
@classmethod
def new(cls, key_algorithm, key_size):
"""
Generate a new PGP key
:param key_algorithm: Key algorithm to use.
:type key_algorithm: A :py:obj:`~constants.PubKeyAlgorithm`
:param key_size: Key size in bits, unless `key_algorithm` is :py:obj:`~constants.PubKeyAlgorithm.ECDSA` or
:py:obj:`~constants.PubKeyAlgorithm.ECDH`, in which case it should be the Curve OID to use.
:type key_size: ``int`` or :py:obj:`~constants.EllipticCurveOID`
:return: A newly generated :py:obj:`PGPKey`
"""
# new private key shell first
key = PGPKey()
if key_algorithm in {PubKeyAlgorithm.RSAEncrypt, PubKeyAlgorithm.RSASign}: # pragma: no cover
warnings.warn('{:s} is deprecated - generating key using RSAEncryptOrSign'.format(key_algorithm.name))
key_algorithm = PubKeyAlgorithm.RSAEncryptOrSign
# generate some key data to match key_algorithm and key_size
key._key = PrivKeyV4.new(key_algorithm, key_size)
return key
def __init__(self):
"""
PGPKey objects represent OpenPGP compliant keys along with all of their associated data.
PGPKey implements the `__str__` method, the output of which will be the key composition in
OpenPGP-compliant ASCII-armored format.
PGPKey implements the `__bytes__` method, the output of which will be the key composition in
OpenPGP-compliant binary format.
Any signatures within the PGPKey that are marked as being non-exportable will not be included in the output
of either of those methods.
"""
super(PGPKey, self).__init__()
self._key = None
self._children = collections.OrderedDict()
self._signatures = SorteDeque()
self._uids = SorteDeque()
self._sibling = None
def __bytearray__(self):
_bytes = bytearray()
# us
_bytes += self._key.__bytearray__()
# our signatures; ignore embedded signatures
for sig in iter(s for s in self._signatures if not s.embedded and s.exportable):
_bytes += sig.__bytearray__()
# one or more User IDs, followed by their signatures
for uid in self._uids:
_bytes += uid._uid.__bytearray__()
for s in [s for s in uid._signatures if s.exportable]:
_bytes += s.__bytearray__()
# subkeys
for sk in self._children.values():
_bytes += sk.__bytearray__()
return _bytes
def __repr__(self):
if self._key is not None:
return "<PGPKey [{:s}][0x{:s}] at 0x{:02X}>" \
"".format(self._key.__class__.__name__, self.fingerprint.keyid, id(self))
return "<PGPKey [unknown] at 0x{:02X}>" \
"".format(id(self))
def __contains__(self, item):
if isinstance(item, PGPKey): # pragma: no cover
return item.fingerprint.keyid in self.subkeys
if isinstance(item, Fingerprint): # pragma: no cover
return item.keyid in self.subkeys
if isinstance(item, PGPUID):
return item in self._uids
if isinstance(item, PGPSignature):
return item in self._signatures
raise TypeError
def __iter__(self):
yield self._key
for uid in self._uids:
for uidpacket in uid:
yield uidpacket
for subkey in self._children.values():
for subkeypacket in subkey:
yield subkeypacket
for sig in self._signatures:
if not sig.embedded:
for sigpacket in sig:
yield sigpacket
def __or__(self, other, from_sib=False):
if isinstance(other, Key) and self._key is None:
self._key = other
elif isinstance(other, PGPKey) and not other.is_primary and other.is_public == self.is_public:
other._parent = self
self._children[other.fingerprint.keyid] = other
elif isinstance(other, PGPSignature):
self._signatures.insort(other)
# if this is a subkey binding signature that has embedded primary key binding signatures, add them to parent
if other.type == SignatureType.Subkey_Binding:
for es in iter(pkb for pkb in other._signature.subpackets['EmbeddedSignature']):
esig = PGPSignature() | es
esig._parent = other
self._signatures.insort(esig)
elif isinstance(other, PGPUID):
other._parent = weakref.ref(self)
self._uids.insort(other)
else:
raise TypeError("unsupported operand type(s) for |: '{:s}' and '{:s}'"
"".format(self.__class__.__name__, other.__class__.__name__))
if isinstance(self._sibling, weakref.ref) and not from_sib:
sib = self._sibling()
if sib is None:
self._sibling = None
else: # pragma: no cover
sib.__or__(copy.copy(other), True)
return self
def __copy__(self):
key = super(PGPKey, self).__copy__()
key._key = copy.copy(self._key)
for uid in self._uids:
key |= copy.copy(uid)
for id, subkey in self._children.items():
key |= copy.copy(subkey)
for sig in self._signatures:
if sig.embedded:
# embedded signatures don't need to be explicitly copied
continue
key |= copy.copy(sig)
return key
def protect(self, passphrase, enc_alg, hash_alg):
"""
Add a passphrase to a private key. If the key is already passphrase protected, it should be unlocked before
a new passphrase can be specified.
Has no effect on public keys.
:param passphrase: A passphrase to protect the key with
:type passphrase: ``str``, ``unicode``
:param enc_alg: Symmetric encryption algorithm to use to protect the key
:type enc_alg: :py:obj:`~constants.SymmetricKeyAlgorithm`
:param hash_alg: Hash algorithm to use in the String-to-Key specifier
:type hash_alg: :py:obj:`~constants.HashAlgorithm`
"""
##TODO: specify strong defaults for enc_alg and hash_alg
if self.is_public:
# we can't protect public keys because only private key material is ever protected
warnings.warn("Public keys cannot be passphrase-protected", stacklevel=2)
return
if self.is_protected and not self.is_unlocked:
# we can't protect a key that is already protected unless it is unlocked first
warnings.warn("This key is already protected with a passphrase - "
"please unlock it before attempting to specify a new passphrase", stacklevel=2)
return
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.protect(passphrase, enc_alg, hash_alg)
del passphrase
@contextlib.contextmanager
def unlock(self, passphrase):
"""
Context manager method for unlocking passphrase-protected private keys. Has no effect if the key is not both
private and passphrase-protected.
When the context managed block is exited, the unprotected private key material is removed.
Example::
privkey = PGPKey()
privkey.parse(keytext)
assert privkey.is_protected
assert privkey.is_unlocked is False
# privkey.sign("some text") <- this would raise an exception
with privkey.unlock("TheCorrectPassphrase"):
# privkey is now unlocked
assert privkey.is_unlocked
# so you can do things with it
sig = privkey.sign("some text")
# privkey is no longer unlocked
assert privkey.is_unlocked is False
Emits a :py:obj:`~warnings.UserWarning` if the key is public or not passphrase protected.
:param str passphrase: The passphrase to be used to unlock this key.
:raises: :py:exc:`~pgpy.errors.PGPDecryptionError` if the passphrase is incorrect
"""
if self.is_public:
# we can't unprotect public keys because only private key material is ever protected
warnings.warn("Public keys cannot be passphrase-protected", stacklevel=3)
yield self
return
if not self.is_protected:
# we can't unprotect private keys that are not protected, because there is no ciphertext to decrypt
warnings.warn("This key is not protected with a passphrase", stacklevel=3)
yield self
return
try:
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.unprotect(passphrase)
del passphrase
yield self
finally:
# clean up here by deleting the previously decrypted secret key material
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.keymaterial.clear()
def add_uid(self, uid, selfsign=True, **prefs):
"""
Add a User ID to this key.
:param uid: The user id to add
:type uid: :py:obj:`~pgpy.PGPUID`
:param selfsign: Whether or not to self-sign the user id before adding it
:type selfsign: ``bool``
Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`.
Any such keyword arguments are ignored if selfsign is ``False``
"""
uid._parent = self
if selfsign:
uid |= self.certify(uid, SignatureType.Positive_Cert, **prefs)
self |= uid
def get_uid(self, search):
"""
Find and return a User ID that matches the search string given.
:param search: A text string to match name, comment, or email address against
:type search: ``str``, ``unicode``
:return: The first matching :py:obj:`~pgpy.PGPUID`, or ``None`` if no matches were found.
"""
if self.is_primary:
return next((u for u in self._uids if search in filter(lambda a: a is not None, (u.name, u.comment, u.email))), None)
return self.parent.get_uid(search)
def del_uid(self, search):
"""
Find and remove a user id that matches the search string given. This method does not modify the corresponding
:py:obj:`~pgpy.PGPUID` object; it only removes it from the list of user ids on the key.
:param search: A text string to match name, comment, or email address against
:type search: ``str``, ``unicode``
"""
u = self.get_uid(search)
if u is None:
raise KeyError("uid '{:s}' not found".format(search))
u._parent = None
self._uids.remove(u)
def add_subkey(self, key, **prefs):
"""
Add a key as a subkey to this key.
:param key: A private :py:obj:`~pgpy.PGPKey` that does not have any subkeys of its own
:keyword usage: A ``set`` of key usage flags, as :py:obj:`~constants.KeyFlags` for the subkey to be added.
:type usage: ``set``
Other valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`
"""
if self.is_public:
raise PGPError("Cannot add a subkey to a public key. Add the subkey to the private component first!")
if key.is_public:
raise PGPError("Cannot add a public key as a subkey to this key")
if key.is_primary:
if len(key._children) > 0:
raise PGPError("Cannot add a key that already has subkeys as a subkey!")
# convert key into a subkey
npk = PrivSubKeyV4()
npk.pkalg = key._key.pkalg
npk.created = key._key.created
npk.keymaterial = key._key.keymaterial
key._key = npk
key._key.update_hlen()
self._children[key.fingerprint.keyid] = key
key._parent = self
##TODO: skip this step if the key already has a subkey binding signature
bsig = self.bind(key, **prefs)
key |= bsig
def _sign(self, subject, sig, **prefs):
"""
The actual signing magic happens here.
:param subject: The subject to sign
:param sig: The :py:obj:`PGPSignature` object the new signature is to be encapsulated within
:returns: ``sig``, after the signature is added to it.
"""
user = prefs.pop('user', None)
uid = None
if user is not None:
uid = self.get_uid(user)
else:
uid = next(iter(self.userids), None)
if uid is None and self.parent is not None:
uid = next(iter(self.parent.userids), None)
if sig.hash_algorithm is None:
sig._signature.halg = uid.selfsig.hashprefs[0]
if uid is not None and sig.hash_algorithm not in uid.selfsig.hashprefs:
warnings.warn("Selected hash algorithm not in key preferences", stacklevel=4)
# signature options that can be applied at any level
expires = prefs.pop('expires', None)
notation = prefs.pop('notation', None)
revocable = prefs.pop('revocable', True)
policy_uri = prefs.pop('policy_uri', None)
if expires is not None:
# expires should be a timedelta, so if it's a datetime, turn it into a timedelta
if isinstance(expires, datetime):
expires = expires - self.created
sig._signature.subpackets.addnew('SignatureExpirationTime', hashed=True, expires=expires)
if revocable is False:
sig._signature.subpackets.addnew('Revocable', hashed=True, bflag=revocable)
if notation is not None:
for name, value in notation.items():
# mark all notations as human readable unless value is a bytearray
flags = NotationDataFlags.HumanReadable
if isinstance(value, bytearray):
flags = 0x00
sig._signature.subpackets.addnew('NotationData', hashed=True, flags=flags, name=name, value=value)
if policy_uri is not None:
sig._signature.subpackets.addnew('Policy', hashed=True, uri=policy_uri)
if user is not None and uid is not None:
signers_uid = "{:s}".format(uid)
sig._signature.subpackets.addnew('SignersUserID', hashed=True, userid=signers_uid)
# handle an edge case for timestamp signatures vs standalone signatures
if sig.type == SignatureType.Timestamp and len(sig._signature.subpackets._hashed_sp) > 1:
sig._signature.sigtype = SignatureType.Standalone
sigdata = sig.hashdata(subject)
h2 = sig.hash_algorithm.hasher
h2.update(sigdata)
sig._signature.hash2 = bytearray(h2.digest()[:2])
_sig = self._key.sign(sigdata, getattr(hashes, sig.hash_algorithm.name)())
if _sig is NotImplemented:
raise NotImplementedError(self.key_algorithm)
sig._signature.signature.from_signer(_sig)
sig._signature.update_hlen()
return sig
@KeyAction(KeyFlags.Sign, is_unlocked=True, is_public=False)
def sign(self, subject, **prefs):
"""
Sign text, a message, or a timestamp using this key.
:param subject: The text to be signed
:type subject: ``str``, :py:obj:`~pgpy.PGPMessage`, ``None``
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
The following optional keyword arguments can be used with :py:meth:`PGPKey.sign`, as well as
:py:meth:`PGPKey.certify`, :py:meth:`PGPKey.revoke`, and :py:meth:`PGPKey.bind`:
:keyword expires: Set an expiration date for this signature
:type expires: :py:obj:`~datetime.datetime`, :py:obj:`~datetime.timedelta`
:keyword notation: Add arbitrary notation data to this signature.
:type notation: ``dict``
:keyword policy_uri: Add a URI to the signature that should describe the policy under which the signature
was issued.
:type policy_uri: ``str``
:keyword revocable: If ``False``, this signature will be marked non-revocable
:type revocable: ``bool``
:keyword user: Specify which User ID to use when creating this signature. Also adds a "Signer's User ID"
to the signature.
:type user: ``str``
"""
sig_type = SignatureType.BinaryDocument
hash_algo = prefs.pop('hash', None)
if subject is None:
sig_type = SignatureType.Timestamp
if isinstance(subject, PGPMessage):
if subject.type == 'cleartext':
sig_type = SignatureType.CanonicalDocument
subject = subject.message
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid)
return self._sign(subject, sig, **prefs)
@KeyAction(KeyFlags.Certify, is_unlocked=True, is_public=False)
def certify(self, subject, level=SignatureType.Generic_Cert, **prefs):
"""
Sign a key or a user id within a key.
:param subject: The user id or key to be certified.
:type subject: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:param level: :py:obj:`~constants.SignatureType.Generic_Cert`, :py:obj:`~constants.SignatureType.Persona_Cert`,
:py:obj:`~constants.SignatureType.Casual_Cert`, or :py:obj:`~constants.SignatureType.Positive_Cert`.
Only used if subject is a :py:obj:`PGPUID`; otherwise, it is ignored.
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.certify`.
These optional keywords only make sense, and thus only have an effect, when self-signing a key or User ID:
:keyword usage: A ``set`` of key usage flags, as :py:obj:`~constants.KeyFlags`.
This keyword is ignored for non-self-certifications.
:type usage: ``set``
:keyword ciphers: A list of preferred symmetric ciphers, as :py:obj:`~constants.SymmetricKeyAlgorithm`.
This keyword is ignored for non-self-certifications.
:type ciphers: ``list``
:keyword hashes: A list of preferred hash algorithms, as :py:obj:`~constants.HashAlgorithm`.
This keyword is ignored for non-self-certifications.
:type hashes: ``list``
:keyword compression: A list of preferred compression algorithms, as :py:obj:`~constants.CompressionAlgorithm`.
This keyword is ignored for non-self-certifications.
:type compression: ``list``
:keyword key_expiration: Specify a key expiration date for when this key should expire, or a
:py:obj:`~datetime.timedelta` of how long after the key was created it should expire.
This keyword is ignored for non-self-certifications.
:type key_expiration: :py:obj:`datetime.datetime`, :py:obj:`datetime.timedelta`
:keyword keyserver: Specify the URI of the preferred key server of the user.
This keyword is ignored for non-self-certifications.
:type keyserver: ``str``, ``unicode``, ``bytes``
:keyword primary: Whether or not to consider the certified User ID as the primary one.
This keyword is ignored for non-self-certifications, and any certifications directly on keys.
:type primary: ``bool``
These optional keywords only make sense, and thus only have an effect, when signing another key or User ID:
:keyword trust: Specify the level and amount of trust to assert when certifying a public key. Should be a tuple
of two ``int`` s, specifying the trust level and trust amount. See
`RFC 4880 Section 5.2.3.13. Trust Signature <https://tools.ietf.org/html/rfc4880#section-5.2.3.13>`_
for more on what these values mean.
:type trust: ``tuple`` of two ``int`` s
:keyword regex: Specify a regular expression to constrain the specified trust signature in the resulting signature.
Symbolically signifies that the specified trust signature only applies to User IDs which match
this regular expression.
This is meaningless without also specifying trust level and amount.
:type regex: ``str``
"""
hash_algo = prefs.pop('hash', None)
sig_type = level
if isinstance(subject, PGPKey):
sig_type = SignatureType.DirectlyOnKey
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid)
# signature options that only make sense in certifications
usage = prefs.pop('usage', None)
exportable = prefs.pop('exportable', None)
if usage is not None:
sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)
if exportable is not None:
sig._signature.subpackets.addnew('ExportableCertification', hashed=True, bflag=exportable)
keyfp = self.fingerprint
if isinstance(subject, PGPKey):
keyfp = subject.fingerprint
if isinstance(subject, PGPUID) and subject._parent is not None:
keyfp = subject._parent.fingerprint
if keyfp == self.fingerprint:
# signature options that only make sense in self-certifications
cipher_prefs = prefs.pop('ciphers', None)
hash_prefs = prefs.pop('hashes', None)
compression_prefs = prefs.pop('compression', None)
key_expires = prefs.pop('key_expiration', None)
keyserver_flags = prefs.pop('keyserver_flags', None)
keyserver = prefs.pop('keyserver', None)
primary_uid = prefs.pop('primary', None)
if key_expires is not None:
# key expires should be a timedelta, so if it's a datetime, turn it into a timedelta
if isinstance(key_expires, datetime):
key_expires = key_expires - self.created
sig._signature.subpackets.addnew('KeyExpirationTime', hashed=True, expires=key_expires)
if cipher_prefs is not None:
sig._signature.subpackets.addnew('PreferredSymmetricAlgorithms', hashed=True, flags=cipher_prefs)
if hash_prefs is not None:
sig._signature.subpackets.addnew('PreferredHashAlgorithms', hashed=True, flags=hash_prefs)
if sig.hash_algorithm is None:
sig._signature.halg = hash_prefs[0]
if compression_prefs is not None:
sig._signature.subpackets.addnew('PreferredCompressionAlgorithms', hashed=True, flags=compression_prefs)
if keyserver_flags is not None:
sig._signature.subpackets.addnew('KeyServerPreferences', hashed=True, flags=keyserver_flags)
if keyserver is not None:
sig._signature.subpackets.addnew('PreferredKeyServer', hashed=True, uri=keyserver)
if primary_uid is not None:
sig._signature.subpackets.addnew('PrimaryUserID', hashed=True, primary=primary_uid)
# Features is always set on self-signatures
sig._signature.subpackets.addnew('Features', hashed=True, flags=Features.pgpy_features)
else:
# signature options that only make sense in non-self-certifications
trust = prefs.pop('trust', None)
regex = prefs.pop('regex', None)
if trust is not None:
sig._signature.subpackets.addnew('TrustSignature', hashed=True, level=trust[0], amount=trust[1])
if regex is not None:
sig._signature.subpackets.addnew('RegularExpression', hashed=True, regex=regex)
return self._sign(subject, sig, **prefs)
@KeyAction(KeyFlags.Certify, is_unlocked=True, is_public=False)
def revoke(self, target, **prefs):
"""
Revoke a key, a subkey, or all current certification signatures of a User ID that were generated by this key so far.
:param target: The key to revoke
:type target: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoke`.
:keyword reason: Defaults to :py:obj:`constants.RevocationReason.NotSpecified`
:type reason: One of :py:obj:`constants.RevocationReason`.
:keyword comment: Defaults to an empty string.
:type comment: ``str``
"""
def _can_revoke(revoker, target_primary):
for self_sig in target_primary.self_signatures:
for rk in self_sig.revocation_key:
if rk.algorithm == revoker.key_algorithm and rk.fingerprint == revoker.fingerprint:
return True
return False
hash_algo = prefs.pop('hash', None)
sig_type = None
if isinstance(target, PGPUID):
sig_type = SignatureType.CertRevocation
elif isinstance(target, PGPKey):
# Check that we are revoking a key we can revoke.
if target.is_primary:
if target.fingerprint == self.fingerprint or _can_revoke(self, target):
sig_type = SignatureType.KeyRevocation
else:
if target in self or _can_revoke(self, target.parent):
sig_type = SignatureType.SubkeyRevocation
else: # pragma: no cover
raise TypeError
if sig_type is None:
raise PGPError("Can't revoke the given key with this key.")
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid)
# signature options that only make sense when revoking
reason = prefs.pop('reason', RevocationReason.NotSpecified)
comment = prefs.pop('comment', "")
sig._signature.subpackets.addnew('ReasonForRevocation', hashed=True, code=reason, string=comment)
return self._sign(target, sig, **prefs)
@KeyAction(is_unlocked=True, is_public=False)
def revoker(self, revoker, **prefs):
"""
Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig = PGPSignature.new(SignatureType.DirectlyOnKey, self.key_algorithm, hash_algo, self.fingerprint.keyid)
# signature options that only make sense when adding a revocation key
sensitive = prefs.pop('sensitive', False)
keyclass = RevocationKeyClass.Normal | (RevocationKeyClass.Sensitive if sensitive else 0x00)
sig._signature.subpackets.addnew('RevocationKey',
hashed=True,
algorithm=revoker.key_algorithm,
fingerprint=revoker.fingerprint,
keyclass=keyclass)
# revocation keys should really not be revocable themselves
prefs['revocable'] = False
return self._sign(self, sig, **prefs)
@KeyAction(is_unlocked=True, is_public=False)
def bind(self, key, **prefs):
"""
Bind a subkey to this key.
Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPkey.certify`
"""
hash_algo = prefs.pop('hash', None)
if self.is_primary and not key.is_primary:
sig_type = SignatureType.Subkey_Binding
elif key.is_primary and not self.is_primary:
sig_type = SignatureType.PrimaryKey_Binding
else: # pragma: no cover
raise PGPError
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid)
if sig_type == SignatureType.Subkey_Binding:
# signature options that only make sense in subkey binding signatures
usage = prefs.pop('usage', None)
if usage is not None:
sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)
# if possible, have the subkey create a primary key binding signature
if key.key_algorithm.can_sign:
subkeyid = key.fingerprint.keyid
esig = None
if not key.is_public:
esig = key.bind(self)
elif subkeyid in self.subkeys: # pragma: no cover
esig = self.subkeys[subkeyid].bind(self)
if esig is not None:
sig._signature.subpackets.addnew('EmbeddedSignature', hashed=False, _sig=esig._signature)
return self._sign(key, sig, **prefs)
def verify(self, subject, signature=None):
"""
Verify a subject with a signature using this key.
:param subject: The subject to verify
:type subject: ``str``, ``unicode``, ``None``, :py:obj:`PGPMessage`, :py:obj:`PGPKey`, :py:obj:`PGPUID`
:param signature: If the signature is detached, it should be specified here.
:type signature: :py:obj:`PGPSignature`
:returns: :py:obj:`~pgpy.types.SignatureVerification`
"""
sspairs = []
# some type checking
if not isinstance(subject, (type(None), PGPMessage, PGPKey, PGPUID, PGPSignature, six.string_types, bytes, bytearray)):
raise TypeError("Unexpected subject value: {:s}".format(str(type(subject))))
if not isinstance(signature, (type(None), PGPSignature, PGPDetachedSignature)):
raise TypeError("Unexpected signature value: {:s}".format(str(type(signature))))
def _filter_sigs(sigs):
_ids = {self.fingerprint.keyid} | set(self.subkeys)
return [ sig for sig in sigs if sig.signer in _ids ]
# collect signature(s)
if signature is None:
if isinstance(subject, PGPMessage):
sspairs += [ (sig, subject.message) for sig in _filter_sigs(subject.signatures) ]
if isinstance(subject, (PGPUID, PGPKey)):
sspairs += [ (sig, subject) for sig in _filter_sigs(subject.signatures) ]
if isinstance(subject, PGPKey):
# user ids
sspairs += [ (sig, uid) for uid in subject.userids for sig in _filter_sigs(uid.signatures) ]
# user attributes
sspairs += [ (sig, ua) for ua in subject.userattributes for sig in _filter_sigs(ua.signatures) ]
# subkey binding signatures
sspairs += [ (sig, subkey) for subkey in subject.subkeys.values() for sig in _filter_sigs(subkey.signatures) ]
elif isinstance(signature, PGPSignature) and signature.signer in {self.fingerprint.keyid} | set(self.subkeys):
sspairs += [(signature, subject)]
elif isinstance(signature, PGPDetachedSignature):
for sig in signature:
if sig.signer in {self.fingerprint.keyid} | set(self.subkeys):
sspairs += [(sig, subject)]
if len(sspairs) == 0:
raise PGPError("No signatures to verify")
# finally, start verifying signatures
sigv = SignatureVerification()
for sig, subj in sspairs:
if self.fingerprint.keyid != sig.signer and sig.signer in self.subkeys:
warnings.warn("Signature was signed with this key's subkey: {:s}. "
"Verifying with subkey...".format(sig.signer),
stacklevel=2)
sigv &= self.subkeys[sig.signer].verify(subj, sig)
else:
verified = self._key.verify(sig.hashdata(subj), sig.__sig__, getattr(hashes, sig.hash_algorithm.name)())
if verified is NotImplemented:
raise NotImplementedError(sig.key_algorithm)
sigv.add_sigsubj(sig, self, subj, verified)
return sigv
@KeyAction(KeyFlags.EncryptCommunications, KeyFlags.EncryptStorage, is_public=True)
def encrypt(self, message, sessionkey=None, **prefs):
"""
Encrypt a PGPMessage using this key.
:param message: The message to encrypt.
:type message: :py:obj:`PGPMessage`
:optional param sessionkey: Provide a session key to use when encrypting something. Default is ``None``.
If ``None``, a session key of the appropriate length will be generated randomly.
.. warning::
Care should be taken when making use of this option! Session keys *absolutely need*
to be unpredictable! Use the ``gen_key()`` method on the desired
:py:obj:`~constants.SymmetricKeyAlgorithm` to generate the session key!
:type sessionkey: ``bytes``, ``str``
:raises: :py:exc:`~errors.PGPEncryptionError` if encryption failed for any reason.
:returns: A new :py:obj:`PGPMessage` with the encrypted contents of ``message``
The following optional keyword arguments can be used with :py:meth:`PGPKey.encrypt`:
:keyword cipher: Specifies the symmetric block cipher to use when encrypting the message.
:type cipher: :py:obj:`~constants.SymmetricKeyAlgorithm`
:keyword user: Specifies the User ID to use as the recipient for this encryption operation, for the purposes of
preference defaults and selection validation.
:type user: ``str``, ``unicode``
:keyword throw_keyid: Whether to zero out the keyid. An all zero keyid MAY be used as a wild-card keyid.
:type throw_keyid: ``bool``
"""
user = prefs.pop('user', None)
uid = None
if user is not None:
uid = self.get_uid(user)
else:
uid = next(iter(self.userids), None)
if uid is None and self.parent is not None:
uid = next(iter(self.parent.userids), None)
cipher_algo = prefs.pop('cipher', uid.selfsig.cipherprefs[0])
if cipher_algo not in uid.selfsig.cipherprefs:
warnings.warn("Selected symmetric algorithm not in key preferences", stacklevel=3)
if message.is_compressed and message._compression not in uid.selfsig.compprefs:
warnings.warn("Selected compression algorithm not in key preferences", stacklevel=3)
if sessionkey is None:
sessionkey = cipher_algo.gen_key()
throw_keyid = prefs.pop('throw_keyid', False)
# set up a new PKESessionKeyV3
pkesk = PKESessionKeyV3()
if throw_keyid:
pkesk.encrypter = PGPKey.__zero_keyid
else:
pkesk.encrypter = bytearray(binascii.unhexlify(self.fingerprint.keyid.encode('latin-1')))
pkesk.pkalg = self.key_algorithm
# pkesk.encrypt_sk(self.__key__, cipher_algo, sessionkey)
pkesk.encrypt_sk(self._key, cipher_algo, sessionkey)
if message.is_encrypted: # pragma: no cover
_m = message
else:
_m = PGPMessage()
skedata = IntegrityProtectedSKEDataV1()
skedata.encrypt(sessionkey, cipher_algo, message.__bytes__())
_m |= skedata
_m |= pkesk
return _m
def _decrypt(self, pkesk, message):
alg, key = pkesk.decrypt_sk(self._key)
# now that we have the symmetric cipher used and the key, we can decrypt the actual message
decmsg = PGPMessage()
decmsg.parse(message.message.decrypt(key, alg))
return decmsg
@KeyAction(is_unlocked=True, is_public=False)
def decrypt(self, message):
"""
Decrypt a PGPMessage using this key.
:param message: An encrypted :py:obj:`PGPMessage`
:raises: :py:exc:`~errors.PGPError` if the key is not private, or protected but not unlocked.
:raises: :py:exc:`~errors.PGPDecryptionError` if decryption fails for any other reason.
:returns: A new :py:obj:`PGPMessage` with the decrypted contents of ``message``.
"""
if not message.is_encrypted:
warnings.warn("This message is not encrypted", stacklevel=3)
return message
if self.fingerprint.keyid in message.encrypters:
# we have some pkesks encrypted to this key, try decrypting them.
pkesks = [pk for pk in message._sessionkeys
if pk.pkalg == self.key_algorithm and pk.encrypter == self.fingerprint.keyid]
# decrypt appropriate pkesk
for pkesk in pkesks:
try:
return self._decrypt(pkesk, message)
except (PGPDecryptionError, ValueError): # pragma: no cover
pass
# fallthrough if we haven't succeeded
else:
# we don't have any pkesks encrypted to this key, see if we have some for subkeys
# if not check if we have some zero keyid pkesks and try decrypting them with this key and finally subkeys
sks = set(self.subkeys)
mis = set(message.encrypters)
if sks & mis:
skid = list(sks & mis)[0]
warnings.warn("Message was encrypted with this key's subkey: {:s}. "
"Decrypting with that...".format(skid),
stacklevel=2)
return self.subkeys[skid].decrypt(message)
elif PGPKey.__zero_keyid_str in mis:
# decrypt zero keyid pkesks, first try with this key, then pass on to subkeys
# here we assume that if self._decrypt doesnt raise a PGPError that it decrypted successfully
# however, that assumption may not be correct so we might return some garbage back to the caller
# but that's really the most we can do with decrypting thrown keyid pkesks.
zero_keyid_pkesks = [pk for pk in message._sessionkeys
if pk.pkalg == self.key_algorithm and pk.encrypter == PGPKey.__zero_keyid_str]
for pkesk in zero_keyid_pkesks:
try:
return self._decrypt(pkesk, message)
except (PGPDecryptionError, ValueError):
pass
for subkey in self.subkeys.values():
try:
return subkey.decrypt(message)
except PGPError:
pass
# fallthrough if we haven't succeeded
raise PGPError("Cannot decrypt the provided message with this key")
def parse(self, data):
unarmored = self.ascii_unarmor(data)
data = unarmored['body']
if unarmored['magic'] is not None and 'KEY' not in unarmored['magic']:
raise ValueError('Expected: KEY. Got: {}'.format(str(unarmored['magic'])))
if unarmored['headers'] is not None:
self.ascii_headers = unarmored['headers']
# parse packets
# keys will hold other keys parsed here
keys = collections.OrderedDict()
# orphaned will hold all non-opaque orphaned packets
orphaned = []
# last holds the last non-signature thing processed
##TODO: see issue #141 and fix this better
getpkt = lambda d: Packet(d) if len(d) > 0 else None # flake8: noqa
# some packets are filtered out
getpkt = filter(lambda p: p.header.tag != PacketTag.Trust, iter(functools.partial(getpkt, data), None))
def pktgrouper():
class PktGrouper(object):
def __init__(self):
self.last = None
def __call__(self, pkt):
if pkt.header.tag != PacketTag.Signature:
self.last = '{:02X}_{:s}'.format(id(pkt), pkt.__class__.__name__)
return self.last
return PktGrouper()
while True:
# print(type(p) for p in getpkt)
for group in iter(group for _, group in itertools.groupby(getpkt, key=pktgrouper()) if not _.endswith('Opaque')):
pkt = next(group)
# deal with pkt first
if isinstance(pkt, Key):
pgpobj = (self if self._key is None else PGPKey()) | pkt
elif isinstance(pkt, (UserID, UserAttribute)):
pgpobj = PGPUID() | pkt
else: # pragma: no cover
break
# add signatures to whatever we got
[ operator.ior(pgpobj, PGPSignature() | sig) for sig in group if not isinstance(sig, Opaque) ]
# and file away pgpobj
if isinstance(pgpobj, PGPKey):
if pgpobj.is_primary:
keys[(pgpobj.fingerprint.keyid, pgpobj.is_public)] = pgpobj
else:
keys[next(reversed(keys))] |= pgpobj
elif isinstance(pgpobj, PGPUID):
# parent is likely the most recently parsed primary key
keys[next(reversed(keys))] |= pgpobj
else: # pragma: no cover
break
else:
# finished normally
break
# this will only be reached called if the inner loop hit a break
warnings.warn("Warning: Orphaned packet detected! {:s}".format(repr(pkt)), stacklevel=2) # pragma: no cover
orphaned.append(pkt) # pragma: no cover
for pkt in group: # pragma: no cover
orphaned.append(pkt)
# remove the reference to self from keys
[ keys.pop((getattr(self, 'fingerprint.keyid', '~'), None), t) for t in (True, False) ]
# return {'keys': keys, 'orphaned': orphaned}
return keys
class PGPKeyring(collections.Container, collections.Iterable, collections.Sized):
def __init__(self, *args):
"""
PGPKeyring objects represent in-memory keyrings that can contain any combination of supported private and public
keys. It can not currently be conveniently exported to a format that can be understood by GnuPG.
"""
super(PGPKeyring, self).__init__()
self._keys = {}
self._pubkeys = collections.deque()
self._privkeys = collections.deque()
self._aliases = collections.deque([{}])
self.load(*args)
def __contains__(self, alias):
aliases = set().union(*self._aliases)
if isinstance(alias, six.string_types):
return alias in aliases or alias.replace(' ', '') in aliases
return alias in aliases # pragma: no cover
def __len__(self):
return len(self._keys)
def __iter__(self): # pragma: no cover
for pgpkey in itertools.chain(self._pubkeys, self._privkeys):
yield pgpkey
def _get_key(self, alias):
for m in self._aliases:
if alias in m:
return self._keys[m[alias]]
if alias.replace(' ', '') in m:
return self._keys[m[alias.replace(' ', '')]]
raise KeyError(alias)
def _get_keys(self, alias):
return [self._keys[m[alias]] for m in self._aliases if alias in m]
def _sort_alias(self, alias):
# remove alias from all levels of _aliases, and sort by created time and key half
# so the order of _aliases from left to right:
# - newer keys come before older ones
# - private keys come before public ones
#
# this list is sorted in the opposite direction from that, because they will be placed into self._aliases
# from right to left.
pkids = sorted(list(set().union(m.pop(alias) for m in self._aliases if alias in m)),
key=lambda pkid: (self._keys[pkid].created, self._keys[pkid].is_public))
# drop the now-sorted aliases into place
for depth, pkid in enumerate(pkids):
self._aliases[depth][alias] = pkid
# finally, remove any empty dicts left over
while {} in self._aliases: # pragma: no cover
self._aliases.remove({})
def _add_alias(self, alias, pkid):
# brand new alias never seen before!
if alias not in self:
self._aliases[-1][alias] = pkid
# this is a duplicate alias->key link; ignore it
elif alias in self and pkid in set(m[alias] for m in self._aliases if alias in m):
pass # pragma: no cover
# this is an alias that already exists, but points to a key that is not already referenced by it
else:
adepth = len(self._aliases) - len([None for m in self._aliases if alias in m]) - 1
# all alias maps have this alias, so increase total depth by 1
if adepth == -1:
self._aliases.appendleft({})
adepth = 0
self._aliases[adepth][alias] = pkid
self._sort_alias(alias)
def _add_key(self, pgpkey):
pkid = id(pgpkey)
if pkid not in self._keys:
self._keys[pkid] = pgpkey
# add to _{pub,priv}keys if this is either a primary key, or a subkey without one
if pgpkey.parent is None:
if pgpkey.is_public:
self._pubkeys.append(pkid)
else:
self._privkeys.append(pkid)
# aliases
self._add_alias(pgpkey.fingerprint, pkid)
self._add_alias(pgpkey.fingerprint.keyid, pkid)
self._add_alias(pgpkey.fingerprint.shortid, pkid)
for uid in pgpkey.userids:
self._add_alias(uid.name, pkid)
if uid.comment:
self._add_alias(uid.comment, pkid)
if uid.email:
self._add_alias(uid.email, pkid)
# subkeys
for subkey in pgpkey.subkeys.values():
self._add_key(subkey)
def load(self, *args):
"""
Load all keys provided into this keyring object.
:param \*args: Each arg in ``args`` can be any of the formats supported by :py:meth:`PGPKey.from_path` and
:py:meth:`PGPKey.from_blob` or a :py:class:`PGPKey` instance, or a ``list`` or ``tuple`` of these.
:type \*args: ``list``, ``tuple``, ``str``, ``unicode``, ``bytes``, ``bytearray``
:returns: a ``set`` containing the unique fingerprints of all of the keys that were loaded during this operation.
"""
def _preiter(first, iterable):
yield first
for item in iterable:
yield item
loaded = set()
for key in iter(item for ilist in iter(ilist if isinstance(ilist, (tuple, list)) else [ilist] for ilist in args)
for item in ilist):
keys = {}
if isinstance(key, PGPKey):
_key = key
elif os.path.isfile(key):
_key, keys = PGPKey.from_file(key)
else:
_key, keys = PGPKey.from_blob(key)
for ik in _preiter(_key, keys.values()):
self._add_key(ik)
loaded |= {ik.fingerprint} | {isk.fingerprint for isk in ik.subkeys.values()}
return list(loaded)
@contextlib.contextmanager
def key(self, identifier):
"""
A context-manager method. Yields the first :py:obj:`PGPKey` object that matches the provided identifier.
:param identifier: The identifier to use to select a loaded key.
:type identifier: :py:exc:`PGPMessage`, :py:exc:`PGPSignature`, ``str``
:raises: :py:exc:`KeyError` if there is no loaded key that satisfies the identifier.
"""
if isinstance(identifier, PGPMessage):
for issuer in identifier.issuers:
if issuer in self:
identifier = issuer
break
if isinstance(identifier, PGPSignature):
identifier = identifier.signer
yield self._get_key(identifier)
def fingerprints(self, keyhalf='any', keytype='any'):
"""
List loaded fingerprints with some optional filtering.
:param str keyhalf: Can be 'any', 'public', or 'private'. If 'public', or 'private', the fingerprints of keys of the
the other type will not be included in the results.
:param str keytype: Can be 'any', 'primary', or 'sub'. If 'primary' or 'sub', the fingerprints of keys of the
the other type will not be included in the results.
:returns: a ``set`` of fingerprints of keys matching the filters specified.
"""
return {pk.fingerprint for pk in self._keys.values()
if pk.is_primary in [True if keytype in ['primary', 'any'] else None,
False if keytype in ['sub', 'any'] else None]
if pk.is_public in [True if keyhalf in ['public', 'any'] else None,
False if keyhalf in ['private', 'any'] else None]}
def unload(self, key):
"""
Unload a loaded key and its subkeys.
The easiest way to do this is to select a key using :py:meth:`PGPKeyring.key` first::
with keyring.key("DSA von TestKey") as key:
keyring.unload(key)
:param key: The key to unload.
:type key: :py:obj:`PGPKey`
"""
assert isinstance(key, PGPKey)
pkid = id(key)
if pkid in self._keys:
# remove references
[ kd.remove(pkid) for kd in [self._pubkeys, self._privkeys] if pkid in kd ]
# remove the key
self._keys.pop(pkid)
# remove aliases
for m, a in [ (m, a) for m in self._aliases for a, p in m.items() if p == pkid ]:
m.pop(a)
# do a re-sort of this alias if it was not unique
if a in self:
self._sort_alias(a)
# if key is a primary key, unload its subkeys as well
if key.is_primary:
[ self.unload(sk) for sk in key.subkeys.values() ]
| J08nY/PGPy | pgpy/pgp.py | Python | bsd-3-clause | 108,901 |
from ckan.lib.base import BaseController, render
from ckanext.statreports.statistics.user import UserStats
from ckanext.statreports.statistics.package import PackageStats
from ckan.model.meta import engine
class StatisticsController(BaseController):
'''
StatisticsController renders a simple stat page
which can be used to keep track on various Etsin
statistics.
'''
def render_stats(self):
'''
Renders the stats page
'''
# Save the user stats in extra_vars variable, which is then passed on to the template
extra_vars = {}
extra_vars["total_users"] = UserStats.total_users()
extra_vars["total_visitors"] = UserStats.total_visitors(engine) # unique visitors
#extra_vars["total_logged_in"] = UserStats.total_logged_in()
extra_vars["total_packages"] = PackageStats.total_packages()
packages_by_license_type = PackageStats.license_type_package_count()
extra_vars["packages_free"] = packages_by_license_type["free"]
extra_vars["packages_conditional"] = packages_by_license_type["conditional"]
extra_vars["packages_other"] = packages_by_license_type["other"]
#extra_vars["private_packages_monthly"] = PackageStats.private_packages_monthly()
#extra_vars["public_packages_monthly"] = PackageStats.public_packages_monthly()
#extra_vars["license_type_package_count_monthly"] = PackageStats.license_type_package_count_monthly()
#extra_vars["total_packages_monthly"] = PackageStats.total_new_packages()
return render('statreports/stats.html', extra_vars=extra_vars) | kata-csc/ckanext-statreports | ckanext/statreports/controllers.py | Python | agpl-3.0 | 1,634 |
import win32api
import os
import sys
import subprocess
import logging
from itertools import izip_longest
#itertools recipe
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def harddrive_enumerator():
"""
Generator to get all (fixed) drive letters in the computers
Returns tuples of (DriveName, VolumeName) - eg. ("D:", "Samsung Station")
"""
logger = logging.getLogger("keepitup")
drivesDetailedList = []
if sys.platform == "win32":
logger.debug("Enumerating win32 hard drives")
getDrivesProc = subprocess.Popen('wmic logicaldisk where drivetype=3 get name, VolumeName /format:list',
shell=True,
stdout=subprocess.PIPE)
output, err = getDrivesProc.communicate()
logger.debug("Enumerated hard drives output: %s", output)
drivesDetailedList = output.split(os.linesep)
elif sys.platform in ["linux2", "darwin"]:
logger.debug("Enumerating linux/osx hard drives")
raise NotImplementedError()
else:
logger.error("Cannot enumeratre hard drives - unrecognized OS: %s", sys.platform)
raise NotImplementedError()
for name, volumeName in grouper(2, drivesDetailedList):
if "Name=" in name and "VolumeName" in volumeName:
name = name[len("Name="):].strip()
volumeName = volumeName[len("VolumeName="):].strip()
yield name, volumeName
| yoavfrancis/KeepItUp | KeepItUp/harddrive_enumerator.py | Python | mit | 1,589 |
import os
import django
def rel(path):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'HOST': 'localhost',
'NAME': '',
'USER': '',
'PASSWORD': '',
'OPTIONS': {
'autocommit': True,
}
}
}
TEMPLATE_DIRS = (rel("templates"),)
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
SECRET_KEY = 'YOUR_SECRET_KEY'
ROOT_URLCONF = 'urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.gis',
'cities',
)
if django.VERSION < (1, 7):
INSTALLED_APPS += (
'south',
)
CITIES_POSTAL_CODES = ['ALL']
CITIES_LOCALES = ['ALL']
CITIES_PLUGINS = [
'cities.plugin.postal_code_ca.Plugin', # Canada postal codes need region codes remapped to match geonames
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'log_to_stdout': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'cities': {
'handlers': ['log_to_stdout'],
'level': 'INFO',
'propagate': True,
}
}
}
| coderholic/django-cities | example/settings.py | Python | mit | 1,545 |
from django.apps import AppConfig
class ThemeConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "hav.apps.theme"
| whav/hav | src/hav/apps/theme/apps.py | Python | gpl-3.0 | 151 |
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the lazy signature generator
"""
import unittest
from rdkit import Chem
from rdkit.Chem.Pharm2D import SigFactory
try:
from rdkit.Chem.Pharm2D import LazyGenerator
except NotImplementedError:
LazyGenerator = None
class TestCase(unittest.TestCase): # pragma: nocover
def getFactory(self):
factory = SigFactory.SigFactory()
factory.SetPatternsFromSmarts(['O', 'N'])
factory.SetBins([(0, 2), (2, 5), (5, 8)])
factory.SetMinCount(2)
factory.SetMaxCount(3)
return factory
def test_NotImplemented(self):
self.assertIsNone(LazyGenerator, 'Review LazyGenerator unit tests')
@unittest.skipIf(LazyGenerator is None, 'LazyGenerator implementation incomplete')
def test1_simple(self):
mol = Chem.MolFromSmiles('OCC(=O)CCCN')
factory = self.getFactory()
sig = factory.GetSignature()
assert sig.GetSize() == 105, f'bad signature size: {sig.GetSize()}'
sig.SetIncludeBondOrder(0)
gen = LazyGenerator.Generator(sig, mol)
assert len(gen) == sig.GetSize(), f'length mismatch {len(gen)}!={sig.GetSize()}'
tgt = (1, 5, 48)
for bit in tgt:
assert gen[bit], f'bit {bit} not properly set'
assert gen.GetBit(bit), f'bit {bit} not properly set'
assert not gen[bit + 50], f'bit {bit + 100} improperly set'
sig = factory.GetSignature()
assert sig.GetSize() == 105, f'bad signature size: {sig.GetSize()}'
sig.SetIncludeBondOrder(1)
gen = LazyGenerator.Generator(sig, mol)
assert len(gen) == sig.GetSize(), f'length mismatch {len(gen)}!={sig.GetSize()}'
tgt = (1, 4, 5, 45)
for bit in tgt:
assert gen[bit], f'bit {bit} not properly set'
assert gen.GetBit(bit), f'bit {bit} not properly set'
assert not gen[bit + 50], f'bit {bit + 100} improperly set'
try:
gen[sig.GetSize() + 1]
except IndexError:
ok = 1
else:
ok = 0
assert ok, 'accessing bogus bit did not fail'
try:
gen[-1]
except IndexError:
ok = 1
else:
ok = 0
assert ok, 'accessing bogus bit did not fail'
if __name__ == '__main__': # pragma: nocover
unittest.main()
| ptosco/rdkit | rdkit/Chem/Pharm2D/UnitTestLazyGenerator.py | Python | bsd-3-clause | 2,437 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add standard attribute table
Revision ID: 32e5974ada25
Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
from alembic import op
import sqlalchemy as sa
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def upgrade():
op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True))
| dims/neutron | neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py | Python | apache-2.0 | 1,390 |
from tpg.program import Program
import numpy as np
import random
from tpg.utils import flip
from tpg.action_object import ActionObject
"""
Action Object has a program to produce a value for the action, program doesn't
run if just a discrete action code.
"""
class ConfActionObject:
def init_def(self, initParams=None, action = None):
'''
Defer importing the Team class to avoid circular dependency.
This may require refactoring to fix properly
'''
from tpg.team import Team
# The action is a team
if isinstance(action, Team):
self.teamAction = action
self.actionCode = None
#print("chose team action")
return
# The action is another action object
if isinstance(action, ActionObject):
self.actionCode = action.actionCode
self.teamAction = action.teamAction
return
# An int means the action is an index into the action codes in initParams
if isinstance(action, int):
if "actionCodes" not in initParams:
raise Exception('action codes not found in init params', initParams)
try:
self.actionCode = initParams["actionCodes"][action]
self.teamAction = None
except IndexError as err:
'''
TODO log index error
'''
print("Index error")
return
def init_real(self, initParams=None, action=None):
'''
Defer importing the Team class to avoid circular dependency.
This may require refactoring to fix properly
'''
from tpg.team import Team
if isinstance(action, Team):
# The action is a team
self.actionCode = None
self.actionLength = None
self.teamAction = action
self.program = Program(initParams=initParams,
maxProgramLength=initParams["initMaxActProgSize"],
nOperations=initParams["nOperations"],
nDestinations=initParams["nDestinations"],
inputSize=initParams["inputSize"])
elif isinstance(action, ActionObject):
# The action is another action object
self.actionCode = action.actionCode
self.actionLength = action.actionLength
self.teamAction = action.teamAction
self.program = Program(instructions=action.program.instructions,
initParams=initParams)
elif isinstance(action, int):
# An int means the action is an index into the action codes in initParams
if "actionCodes" not in initParams:
raise Exception('action codes not found in init params', initParams)
try:
self.actionCode = initParams["actionCodes"][action]
self.actionLength = initParams["actionLengths"][action]
self.teamAction = None
self.program = Program(initParams=initParams,
maxProgramLength=initParams["initMaxActProgSize"],
nOperations=initParams["nOperations"],
nDestinations=initParams["nDestinations"],
inputSize=initParams["inputSize"])
except IndexError as err:
'''
TODO log index error
'''
print("Index error")
self.registers = np.zeros(max(initParams["nActRegisters"], initParams["nDestinations"]))
"""
Returns the action code, and if applicable corresponding real action.
"""
def getAction_def(self, state, visited, actVars=None, path_trace=None):
if self.teamAction is not None:
# action from team
return self.teamAction.act(state, visited, actVars=actVars, path_trace=path_trace)
else:
# atomic action
return self.actionCode
"""
Returns the action code, and if applicable corresponding real action(s).
"""
def getAction_real(self, state, visited, actVars=None, path_trace=None):
if self.teamAction is not None:
# action from team
return self.teamAction.act(state, visited, actVars=actVars, path_trace=path_trace)
else:
# atomic action
if self.actionLength == 0:
return self.actionCode, None
else:
return self.actionCode, self.getRealAction(state, actVars=actVars)
"""
Gets the real action from a register.
"""
def getRealAction_real(self, state, actVars=None):
Program.execute(state, self.registers,
self.program.instructions[:,0], self.program.instructions[:,1],
self.program.instructions[:,2], self.program.instructions[:,3])
return self.registers[:self.actionLength]
"""
Gets the real action from a register. With memory.
"""
def getRealAction_real_mem(self, state, actVars=None):
Program.execute(state, self.registers,
self.program.instructions[:,0], self.program.instructions[:,1],
self.program.instructions[:,2], self.program.instructions[:,3],
actVars["memMatrix"], actVars["memMatrix"].shape[0], actVars["memMatrix"].shape[1],
Program.memWriteProbFunc)
return self.registers[:self.actionLength]
"""
Returns true if the action is atomic, otherwise the action is a team.
"""
def isAtomic_def(self):
return self.teamAction is None
"""
Change action to team or atomic action.
"""
def mutate_def(self, mutateParams, parentTeam, teams, pActAtom, learner_id):
# mutate action
if flip(pActAtom):
# atomic
'''
If we already have an action code make sure not to pick the same one.
TODO handle case where there is only 1 action code.
'''
if self.actionCode is not None:
options = list(filter(lambda code: code != self.actionCode,mutateParams["actionCodes"]))
else:
options = mutateParams["actionCodes"]
# let our current team know we won't be pointing to them anymore
if not self.isAtomic():
#print("Learner {} switching from Team {} to atomic action".format(learner_id, self.teamAction.id))
self.teamAction.inLearners.remove(str(learner_id))
self.actionCode = random.choice(options)
self.teamAction = None
else:
# team action
selection_pool = [t for t in teams
if t is not self.teamAction and t is not parentTeam]
# If we have a valid set of options choose from them
if len(selection_pool) > 0:
# let our current team know we won't be pointing to them anymore
oldTeam = None
if not self.isAtomic():
oldTeam = self.teamAction
self.teamAction.inLearners.remove(str(learner_id))
self.teamAction = random.choice(selection_pool)
# Let the new team know we're pointing to them
self.teamAction.inLearners.append(str(learner_id))
#if oldTeam != None:
# print("Learner {} switched from Team {} to Team {}".format(learner_id, oldTeam.id, self.teamAction.id))
return self
"""
Change action to team or atomic action.
"""
def mutate_real(self, mutateParams, parentTeam, teams, pActAtom, learner_id):
# first maybe mutate just program
if self.actionLength > 0 and flip(0.5):
self.program.mutate(mutateParams)
# mutate action
if flip(pActAtom):
# atomic
'''
If we already have an action code make sure not to pick the same one.
TODO handle case where there is only 1 action code.
'''
if self.actionCode is not None:
options = list(filter(lambda code: code != self.actionCode, mutateParams["actionCodes"]))
else:
options = mutateParams["actionCodes"]
# let our current team know we won't be pointing to them anymore
if not self.isAtomic():
#print("Learner {} switching from Team {} to atomic action".format(learner_id, self.teamAction.id))
self.teamAction.inLearners.remove(str(learner_id))
self.actionCode = random.choice(options)
self.actionLength = mutateParams["actionLengths"][self.actionCode]
self.teamAction = None
else:
# team action
selection_pool = [t for t in teams
if t is not self.teamAction and t is not parentTeam]
# If we have a valid set of options choose from them
if len(selection_pool) > 0:
# let our current team know we won't be pointing to them anymore
oldTeam = None
if not self.isAtomic():
oldTeam = self.teamAction
self.teamAction.inLearners.remove(str(learner_id))
self.teamAction = random.choice(selection_pool)
# Let the new team know we're pointing to them
self.teamAction.inLearners.append(str(learner_id))
#if oldTeam != None:
# print("Learner {} switched from Team {} to Team {}".format(learner_id, oldTeam.id, self.teamAction.id))
return self
| Ryan-Amaral/PyTPG | tpg/configuration/conf_action_object.py | Python | mit | 9,785 |
#!/usr/bin/env python3
import sys
import os
import math
import numpy
import matplotlib.pyplot as plt
sys.path.append('../climatemaps')
import climatemaps
from climatemaps.logger import logger
DATA_OUT_DIR = 'website/data'
TYPES = {
'precipitation': {
'filepath': 'data/precipitation/cpre6190.dat',
'conversion_factor': 0.1, # (millimetres/day) *10
'config': climatemaps.contour.ContourPlotConfig(0.1, 16, colormap=plt.cm.jet_r, title='Precipitation', unit='mm/day', logscale=True)
},
'cloud': {
'filepath': 'data/cloud/ccld6190.dat',
'conversion_factor': 1,
'config': climatemaps.contour.ContourPlotConfig(0, 100, colormap=plt.cm.jet_r, title='Cloud coverage', unit='%')
},
'mintemp': {
'filepath': 'data/mintemp/ctmn6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(-40, 28, colormap=plt.cm.jet, title='Min. temperature', unit='C')
},
'meantemp': {
'filepath': 'data/meantemp/ctmp6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(-30, 35, colormap=plt.cm.jet, title='Mean temperature', unit='C')
},
'maxtemp': {
'filepath': 'data/maxtemp/ctmx6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(-20, 45, colormap=plt.cm.jet, title='Max. temperature', unit='C')
},
'diurnaltemprange': {
'filepath': 'data/diurnaltemprange/cdtr6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(5, 20, colormap=plt.cm.jet, title='Diurnal temperature range', unit='C')
},
'wetdays': {
'filepath': 'data/wetdays/cwet6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(0, 30, colormap=plt.cm.jet_r, title='Wet days', unit='days')
},
'wind': {
'filepath': 'data/wind/cwnd6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(0, 9, colormap=plt.cm.jet, title='Wind speed', unit='m/s')
},
'radiation': {
'filepath': 'data/radiation/crad6190.dat',
'conversion_factor': 1.0,
'config': climatemaps.contour.ContourPlotConfig(0, 300, colormap=plt.cm.jet, title='Radiation', unit='W/m^2')
},
'vapourpressure': {
'filepath': 'data/vapourpressure/cvap6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(1, 34, colormap=plt.cm.jet, title='Vapour pressure', unit='hPa')
},
}
def main():
month_upper = 12
n_data_sets = len(TYPES) * month_upper
counter = 0
for data_type, settings in TYPES.items():
for month in range(1, month_upper+1):
logger.info('create image and tiles for "' + data_type + '" and month ' + str(month))
progress = counter/n_data_sets*100.0
logger.info("progress: " + str(int(progress)) + '%')
latrange, lonrange, Z = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
contourmap = climatemaps.contour.Contour(settings['config'], lonrange, latrange, Z)
contourmap.create_contour_data(
DATA_OUT_DIR,
data_type,
month,
figure_dpi=1200
)
counter += 1
# for month in range(1, 13):
# create_optimal_map(month)
def create_optimal_map(month):
settings = TYPES['precipitation']
latrange, lonrange, Zpre = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
settings = TYPES['cloud']
latrange, lonrange, Zcloud = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
settings = TYPES['maxtemp']
latrange, lonrange, Ztmax = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
for x in numpy.nditer(Zpre, op_flags=['readwrite']):
if x/16.0 > 1.0:
x[...] = 0.0
else:
x[...] = 1.0 - x/16.0
for x in numpy.nditer(Ztmax, op_flags=['readwrite']):
temp_ideal = 22
x[...] = 1.0 - math.pow((x-temp_ideal)/10.0, 2)
Zscore_cloud = (100 - Zcloud)/100
Z = (Zpre + Zscore_cloud + Ztmax) / 3.0 * 10.0
for x in numpy.nditer(Z, op_flags=['readwrite']):
x[...] = max(x, 0.0)
config = climatemaps.contour.ContourPlotConfig(0.0, 9.0, colormap=plt.cm.RdYlGn, unit='')
contourmap = climatemaps.contour.Contour(config, lonrange, latrange, Z)
contourmap.create_contour_data(
DATA_OUT_DIR,
'optimal',
month,
figure_dpi=1000
)
print('month done: ' + str(month))
if __name__ == "__main__":
main()
| bartromgens/climatemaps | bin/create_contour.py | Python | mit | 4,856 |
#! /usr/bin/env python
"""
Create a multiple sample table of Pathway Abundances using individual sample
pathabundance files from HUMAnN2
Copyright:
table_from_humann2 Create a multiple sample table of Pathway Abundances using individual sample path abundance files from HUMAnN2
Copyright (C) 2016 William Brazelton
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import argparse
import sys
import os
def print_path(pathway, database, size):
abundances = database[pathway]
diff = size - len(abundances)
if diff > 0:
abundances.extend(list('0') * diff)
print("{}\t{}".format(pathway, '\t'.join(abundances)))
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infiles', metavar='pathabundance',
nargs='+',
help="space-separated list of HUMAnN2 pathabundance files")
parser.add_argument('-l', '--labels', metavar='"label, label, ..."',
type=str,
help="sample names to use in the report, given in a comma-separated "
"list [default: use file names]. The order given must match the "
"order in which the files are provided. If spaces are used "
"in-between labels, the full argument must be wrapped in quotes.")
args = parser.parse_args()
labels = args.labels.split(',')
labels = [i.lstrip() for i in labels]
valid_files = []
# check that the can be opened
for index, infile in enumerate(args.infiles):
infile = args.infiles[index]
try:
fh = open(infile).close()
except IOError:
basename = os.path.basename(infile)
print("Unable to open {} ... skipping".format(basename), file=sys.stderr)
if labels:
labels = labels[:index] + labels[index + 1:]
else:
valid_files.append(infile)
path_db = {}
for index, infile in enumerate(valid_files):
with open(infile) as in_h:
for line in in_h:
if line.startswith('#'):
continue
try:
pathway, abundance = line.strip().split('\t')
except ValueError:
basename = os.path.basename(infile)
print("file '{}' does not have a recognizable format".format(basename), file=sys.stderr)
sys.exit(1)
if pathway in path_db:
diff = index - len(path_db[pathway])
if diff > 0:
path_db[pathway].extend(list('0') * diff)
path_db[pathway].append(abundance)
else:
path_db[pathway] = list('0') * index + [abundance]
num_valid = len(valid_files)
if labels:
header = "# Pathway\t{}".format('\t'.join(labels))
else:
header = "# Pathway\t{}".format('\t'.join(valid_files))
print(header)
if 'UNMAPPED' in path_db:
print_path('UNMAPPED', path_db, num_valid)
if 'UNINTEGRATED' in path_db:
print_path('UNINTEGRATED', path_db, num_valid)
for pathway in sorted(path_db):
if pathway == 'UNMAPPED' or pathway == 'UNINTEGRATED':
continue
print_path(pathway, path_db, num_valid)
if __name__ == "__main__":
main()
sys.exit(0)
| Brazelton-Lab/lab_scripts | table_from_humann2.py | Python | gpl-2.0 | 4,037 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2008-2009,2011, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import time
import socket
from . import ircutils, registry, utils
from .version import version
###
# *** The following variables are affected by command-line options. They are
# not registry variables for a specific reason. Do *not* change these to
# registry variables without first consulting people smarter than yourself.
###
###
# daemonized: This determines whether or not the bot has been daemonized
# (i.e., set to run in the background). Obviously, this defaults
# to False. A command-line option for obvious reasons.
###
daemonized = False
###
# allowDefaultOwner: True if supybot.capabilities is allowed not to include
# '-owner' -- that is, if all users should be automatically
# recognized as owners. That would suck, hence we require a
# command-line option to allow this stupidity.
###
allowDefaultOwner = False
###
# Here we replace values in other modules as appropriate.
###
utils.web.defaultHeaders['User-agent'] = \
'Mozilla/5.0 (Compatible; Supybot %s)' % version
###
# The standard registry.
###
supybot = registry.Group()
supybot.setName('supybot')
def registerGroup(Group, name, group=None, **kwargs):
if kwargs:
group = registry.Group(**kwargs)
return Group.register(name, group)
def registerGlobalValue(group, name, value):
value.channelValue = False
return group.register(name, value)
def registerChannelValue(group, name, value):
value._supplyDefault = True
value.channelValue = True
g = group.register(name, value)
gname = g._name.lower()
for name in registry._cache.iterkeys():
if name.lower().startswith(gname) and len(gname) < len(name):
name = name[len(gname)+1:] # +1 for .
parts = registry.split(name)
if len(parts) == 1 and parts[0] and ircutils.isChannel(parts[0]):
# This gets the channel values so they always persist.
g.get(parts[0])()
def registerPlugin(name, currentValue=None, public=True):
group = registerGlobalValue(supybot.plugins, name,
registry.Boolean(False, """Determines whether this plugin is loaded by
default.""", showDefault=False))
supybot.plugins().add(name)
registerGlobalValue(group, 'public',
registry.Boolean(public, """Determines whether this plugin is
publicly visible."""))
if currentValue is not None:
supybot.plugins.get(name).setValue(currentValue)
registerGroup(users.plugins, name)
return group
def get(group, channel=None):
if group.channelValue and \
channel is not None and ircutils.isChannel(channel):
return group.get(channel)()
else:
return group()
###
# The user info registry.
###
users = registry.Group()
users.setName('users')
registerGroup(users, 'plugins', orderAlphabetically=True)
def registerUserValue(group, name, value):
assert group._name.startswith('users')
value._supplyDefault = True
group.register(name, value)
class ValidNick(registry.String):
"""Value must be a valid IRC nick."""
def setValue(self, v):
if not ircutils.isNick(v):
self.error()
else:
registry.String.setValue(self, v)
class ValidNicks(registry.SpaceSeparatedListOf):
Value = ValidNick
class ValidNickAllowingPercentS(ValidNick):
"""Value must be a valid IRC nick, with the possible exception of a %s
in it."""
def setValue(self, v):
# If this works, it's a valid nick, aside from the %s.
try:
ValidNick.setValue(self, v.replace('%s', ''))
# It's valid aside from the %s, we'll let it through.
registry.String.setValue(self, v)
except registry.InvalidRegistryValue:
self.error()
class ValidNicksAllowingPercentS(ValidNicks):
Value = ValidNickAllowingPercentS
class ValidChannel(registry.String):
"""Value must be a valid IRC channel name."""
def setValue(self, v):
self.channel = v
if ',' in v:
# To prevent stupid users from: a) trying to add a channel key
# with a comma in it, b) trying to add channels separated by
# commas instead of spaces
try:
(channel, _) = v.split(',')
except ValueError:
self.error()
else:
channel = v
if not ircutils.isChannel(channel):
self.error()
else:
registry.String.setValue(self, v)
def error(self):
try:
super(ValidChannel, self).error()
except registry.InvalidRegistryValue, e:
e.channel = self.channel
raise e
class ValidHostmask(registry.String):
"""Value must be a valid user hostmask."""
def setValue(self, v):
if not ircutils.isUserHostmask(v):
self.error()
super(ValidHostmask, self).setValue(v)
registerGlobalValue(supybot, 'nick',
ValidNick('supybot', """Determines the bot's default nick."""))
registerGlobalValue(supybot.nick, 'alternates',
ValidNicksAllowingPercentS(['%s`', '%s_'], """Determines what alternative
nicks will be used if the primary nick (supybot.nick) isn't available. A
%s in this nick is replaced by the value of supybot.nick when used. If no
alternates are given, or if all are used, the supybot.nick will be perturbed
appropriately until an unused nick is found."""))
registerGlobalValue(supybot, 'ident',
ValidNick('supybot', """Determines the bot's ident string, if the server
doesn't provide one by default."""))
class VersionIfEmpty(registry.String):
def __call__(self):
ret = registry.String.__call__(self)
if not ret:
ret = 'Supybot %s' % version
return ret
registerGlobalValue(supybot, 'user',
VersionIfEmpty('', """Determines the user the bot sends to the server.
A standard user using the current version of the bot will be generated if
this is left empty."""))
class Networks(registry.SpaceSeparatedSetOfStrings):
List = ircutils.IrcSet
registerGlobalValue(supybot, 'networks',
Networks([], """Determines what networks the bot will connect to.""",
orderAlphabetically=True))
class Servers(registry.SpaceSeparatedListOfStrings):
def normalize(self, s):
if ':' not in s:
s += ':6667'
return s
def convert(self, s):
s = self.normalize(s)
(server, port) = s.rsplit(':', 1)
port = int(port)
return (server, port)
def __call__(self):
L = registry.SpaceSeparatedListOfStrings.__call__(self)
return map(self.convert, L)
def __str__(self):
return ' '.join(registry.SpaceSeparatedListOfStrings.__call__(self))
def append(self, s):
L = registry.SpaceSeparatedListOfStrings.__call__(self)
L.append(s)
class SpaceSeparatedSetOfChannels(registry.SpaceSeparatedListOf):
sorted = True
List = ircutils.IrcSet
Value = ValidChannel
def join(self, channel):
from . import ircmsgs # Don't put this globally! It's recursive.
key = self.key.get(channel)()
if key:
return ircmsgs.join(channel, key)
else:
return ircmsgs.join(channel)
def registerNetwork(name, password='', ssl=False):
network = registerGroup(supybot.networks, name)
registerGlobalValue(network, 'password', registry.String(password,
"""Determines what password will be used on %s. Yes, we know that
technically passwords are server-specific and not network-specific,
but this is the best we can do right now.""" % name, private=True))
registryServers = registerGlobalValue(network, 'servers', Servers([],
"""Space-separated list of servers the bot will connect to for %s.
Each will be tried in order, wrapping back to the first when the cycle
is completed.""" % name))
registerGlobalValue(network, 'channels', SpaceSeparatedSetOfChannels([],
"""Space-separated list of channels the bot will join only on %s."""
% name, private=True))
registerGlobalValue(network, 'ssl', registry.Boolean(ssl,
"""Determines whether the bot will attempt to connect with SSL sockets
to %s.""" % name))
registerChannelValue(network.channels, 'key', registry.String('',
"""Determines what key (if any) will be used to join the channel.""", private=True))
return network
# Let's fill our networks.
for (name, s) in registry._cache.iteritems():
if name.startswith('supybot.networks.'):
parts = name.split('.')
name = parts[2]
if name != 'default':
registerNetwork(name)
###
# Reply/error tweaking.
###
registerGroup(supybot, 'reply')
registerGroup(supybot.reply, 'format')
registerChannelValue(supybot.reply.format, 'time',
registry.String('%I:%M %p, %B %d, %Y', """Determines how timestamps printed
for human reading should be formatted. Refer to the Python documentation
for the time module to see valid formatting characters for time
formats."""))
def timestamp(t):
if t is None:
t = time.time()
t = time.localtime(t)
format = get(supybot.reply.format.time, dynamic.channel)
return time.strftime(format, t)
utils.str.timestamp = timestamp
registerGroup(supybot.reply.format.time, 'elapsed')
registerChannelValue(supybot.reply.format.time.elapsed, 'short',
registry.Boolean(False, """Determines whether elapsed times will be given
as "1 day, 2 hours, 3 minutes, and 15 seconds" or as "1d 2h 3m 15s"."""))
originalTimeElapsed = utils.timeElapsed
def timeElapsed(*args, **kwargs):
kwargs['short'] = supybot.reply.format.time.elapsed.short()
return originalTimeElapsed(*args, **kwargs)
utils.timeElapsed = timeElapsed
registerGlobalValue(supybot.reply, 'maximumLength',
registry.Integer(512*256, """Determines the absolute maximum length of the
bot's reply -- no reply will be passed through the bot with a length
greater than this."""))
registerChannelValue(supybot.reply, 'mores',
registry.Boolean(True, """Determines whether the bot will break up long
messages into chunks and allow users to use the 'more' command to get the
remaining chunks."""))
registerChannelValue(supybot.reply.mores, 'maximum',
registry.PositiveInteger(50, """Determines what the maximum number of
chunks (for use with the 'more' command) will be."""))
registerChannelValue(supybot.reply.mores, 'length',
registry.NonNegativeInteger(0, """Determines how long individual chunks
will be. If set to 0, uses our super-tweaked,
get-the-most-out-of-an-individual-message default."""))
registerChannelValue(supybot.reply.mores, 'instant',
registry.PositiveInteger(1, """Determines how many mores will be sent
instantly (i.e., without the use of the more command, immediately when
they are formed). Defaults to 1, which means that a more command will be
required for all but the first chunk."""))
registerGlobalValue(supybot.reply, 'oneToOne',
registry.Boolean(True, """Determines whether the bot will send
multi-message replies in a single message or in multiple messages. For
safety purposes (so the bot is less likely to flood) it will normally send
everything in a single message, using mores if necessary."""))
registerChannelValue(supybot.reply, 'whenNotCommand',
registry.Boolean(True, """Determines whether the bot will reply with an
error message when it is addressed but not given a valid command. If this
value is False, the bot will remain silent, as long as no other plugins
override the normal behavior."""))
registerGroup(supybot.reply, 'error')
registerGlobalValue(supybot.reply.error, 'detailed',
registry.Boolean(False, """Determines whether error messages that result
from bugs in the bot will show a detailed error message (the uncaught
exception) or a generic error message."""))
registerChannelValue(supybot.reply.error, 'inPrivate',
registry.Boolean(False, """Determines whether the bot will send error
messages to users in private. You might want to do this in order to keep
channel traffic to minimum. This can be used in combination with
supybot.reply.error.withNotice."""))
registerChannelValue(supybot.reply.error, 'withNotice',
registry.Boolean(False, """Determines whether the bot will send error
messages to users via NOTICE instead of PRIVMSG. You might want to do this
so users can ignore NOTICEs from the bot and not have to see error
messages; or you might want to use it in combination with
supybot.reply.errorInPrivate so private errors don't open a query window
in most IRC clients."""))
registerChannelValue(supybot.reply.error, 'noCapability',
registry.Boolean(False, """Determines whether the bot will send an error
message to users who attempt to call a command for which they do not have
the necessary capability. You may wish to make this True if you don't want
users to understand the underlying security system preventing them from
running certain commands."""))
registerChannelValue(supybot.reply, 'inPrivate',
registry.Boolean(False, """Determines whether the bot will reply privately
when replying in a channel, rather than replying to the whole channel."""))
registerChannelValue(supybot.reply, 'withNotice',
registry.Boolean(False, """Determines whether the bot will reply with a
notice when replying in a channel, rather than replying with a privmsg as
normal."""))
# XXX: User value.
registerGlobalValue(supybot.reply, 'withNoticeWhenPrivate',
registry.Boolean(False, """Determines whether the bot will reply with a
notice when it is sending a private message, in order not to open a /query
window in clients. This can be overridden by individual users via the user
configuration variable reply.withNoticeWhenPrivate."""))
registerChannelValue(supybot.reply, 'withNickPrefix',
registry.Boolean(True, """Determines whether the bot will always prefix the
user's nick to its reply to that user's command."""))
registerChannelValue(supybot.reply, 'whenNotAddressed',
registry.Boolean(False, """Determines whether the bot should attempt to
reply to all messages even if they don't address it (either via its nick
or a prefix character). If you set this to True, you almost certainly want
to set supybot.reply.whenNotCommand to False."""))
registerChannelValue(supybot.reply, 'requireChannelCommandsToBeSentInChannel',
registry.Boolean(False, """Determines whether the bot will allow you to
send channel-related commands outside of that channel. Sometimes people
find it confusing if a channel-related command (like Filter.outfilter)
changes the behavior of the channel but was sent outside the channel
itself."""))
registerGlobalValue(supybot, 'followIdentificationThroughNickChanges',
registry.Boolean(False, """Determines whether the bot will unidentify
someone when that person changes his or her nick. Setting this to True
will cause the bot to track such changes. It defaults to False for a
little greater security."""))
registerGlobalValue(supybot, 'alwaysJoinOnInvite',
registry.Boolean(False, """Determines whether the bot will always join a
channel when it's invited. If this value is False, the bot will only join
a channel if the user inviting it has the 'admin' capability (or if it's
explicitly told to join the channel using the Admin.join command)"""))
registerChannelValue(supybot.reply, 'showSimpleSyntax',
registry.Boolean(False, """Supybot normally replies with the full help
whenever a user misuses a command. If this value is set to True, the bot
will only reply with the syntax of the command (the first line of the
help) rather than the full help."""))
class ValidPrefixChars(registry.String):
"""Value must contain only ~!@#$%^&*()_-+=[{}]\\|'\";:,<.>/?"""
def setValue(self, v):
if v.translate(utils.str.chars, '`~!@#$%^&*()_-+=[{}]\\|\'";:,<.>/?'):
self.error()
registry.String.setValue(self, v)
registerGroup(supybot.reply, 'whenAddressedBy')
registerChannelValue(supybot.reply.whenAddressedBy, 'chars',
ValidPrefixChars('', """Determines what prefix characters the bot will
reply to. A prefix character is a single character that the bot will use
to determine what messages are addressed to it; when there are no prefix
characters set, it just uses its nick. Each character in this string is
interpreted individually; you can have multiple prefix chars
simultaneously, and if any one of them is used as a prefix the bot will
assume it is being addressed."""))
registerChannelValue(supybot.reply.whenAddressedBy, 'strings',
registry.SpaceSeparatedSetOfStrings([], """Determines what strings the bot
will reply to when they are at the beginning of the message. Whereas
prefix.chars can only be one character (although there can be many of
them), this variable is a space-separated list of strings, so you can
set something like '@@ ??' and the bot will reply when a message is
prefixed by either @@ or ??."""))
registerChannelValue(supybot.reply.whenAddressedBy, 'nick',
registry.Boolean(True, """Determines whether the bot will reply when people
address it by its nick, rather than with a prefix character."""))
registerChannelValue(supybot.reply.whenAddressedBy.nick, 'atEnd',
registry.Boolean(False, """Determines whether the bot will reply when
people address it by its nick at the end of the message, rather than at
the beginning."""))
registerChannelValue(supybot.reply.whenAddressedBy, 'nicks',
registry.SpaceSeparatedSetOfStrings([], """Determines what extra nicks the
bot will always respond to when addressed by, even if its current nick is
something else."""))
###
# Replies
###
registerGroup(supybot, 'replies')
registerChannelValue(supybot.replies, 'success',
registry.NormalizedString("""The operation succeeded.""", """Determines
what message the bot replies with when a command succeeded. If this
configuration variable is empty, no success message will be sent."""))
registerChannelValue(supybot.replies, 'error',
registry.NormalizedString("""An error has occurred and has been logged.
Please contact this bot's administrator for more information.""", """
Determines what error message the bot gives when it wants to be
ambiguous."""))
registerChannelValue(supybot.replies, 'incorrectAuthentication',
registry.NormalizedString("""Your hostmask doesn't match or your password
is wrong.""", """Determines what message the bot replies with when someone
tries to use a command that requires being identified or having a password
and neither credential is correct."""))
# XXX: This should eventually check that there's one and only one %s here.
registerChannelValue(supybot.replies, 'noUser',
registry.NormalizedString("""I can't find %s in my user
database. If you didn't give a user name, then I might not know what your
user is, and you'll need to identify before this command might work.""",
"""Determines what error message the bot replies with when someone tries
to accessing some information on a user the bot doesn't know about."""))
registerChannelValue(supybot.replies, 'notRegistered',
registry.NormalizedString("""You must be registered to use this command.
If you are already registered, you must either identify (using the identify
command) or add a hostmask matching your current hostmask (using the
"hostmask add" command).""", """Determines what error message the bot
replies with when someone tries to do something that requires them to be
registered but they're not currently recognized."""))
registerChannelValue(supybot.replies, 'noCapability',
registry.NormalizedString("""You don't have the %s capability. If you
think that you should have this capability, be sure that you are identified
before trying again. The 'whoami' command can tell you if you're
identified.""", """Determines what error message is given when the bot is
telling someone they aren't cool enough to use the command they tried to
use."""))
registerChannelValue(supybot.replies, 'genericNoCapability',
registry.NormalizedString("""You're missing some capability you need.
This could be because you actually possess the anti-capability for the
capability that's required of you, or because the channel provides that
anti-capability by default, or because the global capabilities include
that anti-capability. Or, it could be because the channel or
supybot.capabilities.default is set to False, meaning that no commands are
allowed unless explicitly in your capabilities. Either way, you can't do
what you want to do.""",
"""Determines what generic error message is given when the bot is telling
someone that they aren't cool enough to use the command they tried to use,
and the author of the code calling errorNoCapability didn't provide an
explicit capability for whatever reason."""))
registerChannelValue(supybot.replies, 'requiresPrivacy',
registry.NormalizedString("""That operation cannot be done in a
channel.""", """Determines what error messages the bot sends to people who
try to do things in a channel that really should be done in private."""))
registerChannelValue(supybot.replies, 'possibleBug',
registry.NormalizedString("""This may be a bug. If you think it is, please
file a bug report at
<http://sourceforge.net/tracker/?func=add&group_id=58965&atid=489447>.""",
"""Determines what message the bot sends when it thinks you've encountered
a bug that the developers don't know about."""))
###
# End supybot.replies.
###
registerGlobalValue(supybot, 'snarfThrottle',
registry.Float(10.0, """A floating point number of seconds to throttle
snarfed URLs, in order to prevent loops between two bots snarfing the same
URLs and having the snarfed URL in the output of the snarf message."""))
registerGlobalValue(supybot, 'upkeepInterval',
registry.PositiveInteger(3600, """Determines the number of seconds between
running the upkeep function that flushes (commits) open databases, collects
garbage, and records some useful statistics at the debugging level."""))
registerGlobalValue(supybot, 'flush',
registry.Boolean(True, """Determines whether the bot will periodically
flush data and configuration files to disk. Generally, the only time
you'll want to set this to False is when you want to modify those
configuration files by hand and don't want the bot to flush its current
version over your modifications. Do note that if you change this to False
inside the bot, your changes won't be flushed. To make this change
permanent, you must edit the registry yourself."""))
###
# supybot.commands. For stuff relating to commands.
###
registerGroup(supybot, 'commands')
class ValidQuotes(registry.Value):
"""Value must consist solely of \", ', and ` characters."""
def setValue(self, v):
if [c for c in v if c not in '"`\'']:
self.error()
super(ValidQuotes, self).setValue(v)
def __str__(self):
return str(self.value)
registerChannelValue(supybot.commands, 'quotes',
ValidQuotes('"', """Determines what characters are valid for quoting
arguments to commands in order to prevent them from being tokenized.
"""))
# This is a GlobalValue because bot owners should be able to say, "There will
# be no nesting at all on this bot." Individual channels can just set their
# brackets to the empty string.
registerGlobalValue(supybot.commands, 'nested',
registry.Boolean(True, """Determines whether the bot will allow nested
commands, which rule. You definitely should keep this on."""))
registerGlobalValue(supybot.commands.nested, 'maximum',
registry.PositiveInteger(10, """Determines what the maximum number of
nested commands will be; users will receive an error if they attempt
commands more nested than this."""))
class ValidBrackets(registry.OnlySomeStrings):
validStrings = ('', '[]', '<>', '{}', '()')
registerChannelValue(supybot.commands.nested, 'brackets',
ValidBrackets('[]', """Supybot allows you to specify what brackets are used
for your nested commands. Valid sets of brackets include [], <>, and {}
(). [] has strong historical motivation, as well as being the brackets
that don't require shift. <> or () might be slightly superior because they
cannot occur in a nick. If this string is empty, nested commands will
not be allowed in this channel."""))
registerChannelValue(supybot.commands.nested, 'pipeSyntax',
registry.Boolean(False, """Supybot allows nested commands. Enabling this
option will allow nested commands with a syntax similar to UNIX pipes, for
example: 'bot: foo | bar'."""))
registerGroup(supybot.commands, 'defaultPlugins',
orderAlphabetically=True, help="""Determines what commands have default
plugins set, and which plugins are set to be the default for each of those
commands.""")
registerGlobalValue(supybot.commands.defaultPlugins, 'importantPlugins',
registry.SpaceSeparatedSetOfStrings(
['Admin', 'Channel', 'Config', 'Misc', 'Owner', 'Plugin', 'User'],
"""Determines what plugins automatically get precedence over all other
plugins when selecting a default plugin for a command. By default,
this includes the standard loaded plugins. You probably shouldn't
change this if you don't know what you're doing; if you do know what
you're doing, then also know that this set is case-sensitive."""))
# supybot.commands.disabled moved to callbacks for canonicalName.
###
# supybot.abuse. For stuff relating to abuse of the bot.
###
registerGroup(supybot, 'abuse')
registerGroup(supybot.abuse, 'flood')
registerGlobalValue(supybot.abuse.flood, 'command',
registry.Boolean(True, """Determines whether the bot will defend itself
against command-flooding."""))
registerGlobalValue(supybot.abuse.flood.command, 'maximum',
registry.PositiveInteger(12, """Determines how many commands users are
allowed per minute. If a user sends more than this many commands in any
60 second period, he or she will be ignored for
supybot.abuse.flood.command.punishment seconds."""))
registerGlobalValue(supybot.abuse.flood.command, 'punishment',
registry.PositiveInteger(300, """Determines how many seconds the bot
will ignore users who flood it with commands."""))
registerGlobalValue(supybot.abuse.flood.command, 'invalid',
registry.Boolean(True, """Determines whether the bot will defend itself
against invalid command-flooding."""))
registerGlobalValue(supybot.abuse.flood.command.invalid, 'maximum',
registry.PositiveInteger(5, """Determines how many invalid commands users
are allowed per minute. If a user sends more than this many invalid
commands in any 60 second period, he or she will be ignored for
supybot.abuse.flood.command.invalid.punishment seconds. Typically, this
value is lower than supybot.abuse.flood.command.maximum, since it's far
less likely (and far more annoying) for users to flood with invalid
commands than for them to flood with valid commands."""))
registerGlobalValue(supybot.abuse.flood.command.invalid, 'punishment',
registry.PositiveInteger(600, """Determines how many seconds the bot
will ignore users who flood it with invalid commands. Typically, this
value is higher than supybot.abuse.flood.command.punishment, since it's far
less likely (and far more annoying) for users to flood witih invalid
commands than for them to flood with valid commands."""))
registerGlobalValue(supybot.abuse.flood.command.invalid, 'notify',
registry.Boolean(True, """Determines whether the bot will notify people that they're
being ignored for invalid command flooding."""))
###
# supybot.drivers. For stuff relating to Supybot's drivers (duh!)
###
registerGroup(supybot, 'drivers')
registerGlobalValue(supybot.drivers, 'poll',
registry.PositiveFloat(1.0, """Determines the default length of time a
driver should block waiting for input."""))
class ValidDriverModule(registry.OnlySomeStrings):
validStrings = ('default', 'Socket', 'Twisted')
registerGlobalValue(supybot.drivers, 'module',
ValidDriverModule('default', """Determines what driver module the bot will
use. Socket, a simple driver based on timeout sockets, is used by default
because it's simple and stable. Twisted is very stable and simple, and if
you've got Twisted installed, is probably your best bet."""))
registerGlobalValue(supybot.drivers, 'maxReconnectWait',
registry.PositiveFloat(300.0, """Determines the maximum time the bot will
wait before attempting to reconnect to an IRC server. The bot may, of
course, reconnect earlier if possible."""))
###
# supybot.directories, for stuff relating to directories.
###
# XXX This shouldn't make directories willy-nilly. As it is now, if it's
# configured, it'll still make the default directories, I think.
class Directory(registry.String):
def __call__(self):
# ??? Should we perhaps always return an absolute path here?
v = super(Directory, self).__call__()
if not os.path.exists(v):
os.mkdir(v)
return v
def dirize(self, filename):
myself = self()
if os.path.isabs(filename):
filename = os.path.abspath(filename)
selfAbs = os.path.abspath(myself)
commonPrefix = os.path.commonprefix([selfAbs, filename])
filename = filename[len(commonPrefix):]
elif not os.path.isabs(myself):
if filename.startswith(myself):
filename = filename[len(myself):]
filename = filename.lstrip(os.path.sep) # Stupid os.path.join!
return os.path.join(myself, filename)
class DataFilename(registry.String):
def __call__(self):
v = super(DataFilename, self).__call__()
dataDir = supybot.directories.data()
if not v.startswith(dataDir):
v = os.path.basename(v)
v = os.path.join(dataDir, v)
self.setValue(v)
return v
class DataFilenameDirectory(DataFilename, Directory):
def __call__(self):
v = DataFilename.__call__(self)
v = Directory.__call__(self)
return v
registerGroup(supybot, 'directories')
registerGlobalValue(supybot.directories, 'conf',
Directory('conf', """Determines what directory configuration data is
put into."""))
registerGlobalValue(supybot.directories, 'data',
Directory('data', """Determines what directory data is put into."""))
registerGlobalValue(supybot.directories, 'backup',
Directory('backup', """Determines what directory backup data is put
into."""))
registerGlobalValue(supybot.directories.data, 'tmp',
DataFilenameDirectory('tmp', """Determines what directory temporary files
are put into."""))
utils.file.AtomicFile.default.tmpDir = supybot.directories.data.tmp
utils.file.AtomicFile.default.backupDir = supybot.directories.backup
registerGlobalValue(supybot.directories, 'plugins',
registry.CommaSeparatedListOfStrings([], """Determines what directories the
bot will look for plugins in. Accepts a comma-separated list of strings.
This means that to add another directory, you can nest the former value and
add a new one. E.g. you can say: bot: 'config supybot.directories.plugins
[config supybot.directories.plugins], newPluginDirectory'."""))
registerGlobalValue(supybot, 'plugins',
registry.SpaceSeparatedSetOfStrings([], """Determines what plugins will
be loaded.""", orderAlphabetically=True))
registerGlobalValue(supybot.plugins, 'alwaysLoadImportant',
registry.Boolean(True, """Determines whether the bot will always load
important plugins (Admin, Channel, Config, Misc, Owner, and User)
regardless of what their configured state is. Generally, if these plugins
are configured not to load, you didn't do it on purpose, and you still
want them to load. Users who don't want to load these plugins are smart
enough to change the value of this variable appropriately :)"""))
###
# supybot.databases. For stuff relating to Supybot's databases (duh!)
###
class Databases(registry.SpaceSeparatedListOfStrings):
def __call__(self):
v = super(Databases, self).__call__()
if not v:
v = ['anydbm', 'cdb', 'flat', 'pickle']
if 'sqlite' in sys.modules:
v.insert(0, 'sqlite')
return v
def serialize(self):
return ' '.join(self.value)
registerGlobalValue(supybot, 'databases',
Databases([], """Determines what databases are available for use. If this
value is not configured (that is, if its value is empty) then sane defaults
will be provided."""))
registerGroup(supybot.databases, 'users')
registerGlobalValue(supybot.databases.users, 'filename',
registry.String('users.conf', """Determines what filename will be used for
the users database. This file will go into the directory specified by the
supybot.directories.conf variable."""))
registerGlobalValue(supybot.databases.users, 'timeoutIdentification',
registry.Integer(0, """Determines how long it takes identification to time
out. If the value is less than or equal to zero, identification never
times out."""))
registerGlobalValue(supybot.databases.users, 'allowUnregistration',
registry.Boolean(False, """Determines whether the bot will allow users to
unregister their users. This can wreak havoc with already-existing
databases, so by default we don't allow it. Enable this at your own risk.
(Do also note that this does not prevent the owner of the bot from using
the unregister command.)
"""))
registerGroup(supybot.databases, 'ignores')
registerGlobalValue(supybot.databases.ignores, 'filename',
registry.String('ignores.conf', """Determines what filename will be used
for the ignores database. This file will go into the directory specified
by the supybot.directories.conf variable."""))
registerGroup(supybot.databases, 'channels')
registerGlobalValue(supybot.databases.channels, 'filename',
registry.String('channels.conf', """Determines what filename will be used
for the channels database. This file will go into the directory specified
by the supybot.directories.conf variable."""))
# TODO This will need to do more in the future (such as making sure link.allow
# will let the link occur), but for now let's just leave it as this.
class ChannelSpecific(registry.Boolean):
def getChannelLink(self, channel):
channelSpecific = supybot.databases.plugins.channelSpecific
channels = [channel]
def hasLinkChannel(channel):
if not get(channelSpecific, channel):
lchannel = get(channelSpecific.link, channel)
if not get(channelSpecific.link.allow, lchannel):
return False
return channel != lchannel
return False
lchannel = channel
while hasLinkChannel(lchannel):
lchannel = get(channelSpecific.link, lchannel)
if lchannel not in channels:
channels.append(lchannel)
else:
# Found a cyclic link. We'll just use the current channel
lchannel = channel
break
return lchannel
registerGroup(supybot.databases, 'plugins')
registerChannelValue(supybot.databases.plugins, 'channelSpecific',
ChannelSpecific(True, """Determines whether database-based plugins that
can be channel-specific will be so. This can be overridden by individual
channels. Do note that the bot needs to be restarted immediately after
changing this variable or your db plugins may not work for your channel;
also note that you may wish to set
supybot.databases.plugins.channelSpecific.link appropriately if you wish
to share a certain channel's databases globally."""))
registerChannelValue(supybot.databases.plugins.channelSpecific, 'link',
ValidChannel('#', """Determines what channel global (non-channel-specific)
databases will be considered a part of. This is helpful if you've been
running channel-specific for awhile and want to turn the databases for
your primary channel into global databases. If
supybot.databases.plugins.channelSpecific.link.allow prevents linking, the
current channel will be used. Do note that the bot needs to be restarted
immediately after changing this variable or your db plugins may not work
for your channel."""))
registerChannelValue(supybot.databases.plugins.channelSpecific.link, 'allow',
registry.Boolean(True, """Determines whether another channel's global
(non-channel-specific) databases will be allowed to link to this channel's
databases. Do note that the bot needs to be restarted immediately after
changing this variable or your db plugins may not work for your channel.
"""))
class CDB(registry.Boolean):
def connect(self, filename):
from . import cdb
basename = os.path.basename(filename)
journalName = supybot.directories.data.tmp.dirize(basename+'.journal')
return cdb.open(filename, 'c',
journalName=journalName,
maxmods=self.maximumModifications())
registerGroup(supybot.databases, 'types')
registerGlobalValue(supybot.databases.types, 'cdb', CDB(True, """Determines
whether CDB databases will be allowed as a database implementation."""))
registerGlobalValue(supybot.databases.types.cdb, 'maximumModifications',
registry.Probability(0.5, """Determines how often CDB databases will have
their modifications flushed to disk. When the number of modified records
is greater than this fraction of the total number of records, the database
will be entirely flushed to disk."""))
# XXX Configuration variables for dbi, sqlite, flat, mysql, etc.
###
# Protocol information.
###
originalIsNick = ircutils.isNick
def isNick(s, strictRfc=None, **kw):
if strictRfc is None:
strictRfc = supybot.protocols.irc.strictRfc()
return originalIsNick(s, strictRfc=strictRfc, **kw)
ircutils.isNick = isNick
###
# supybot.protocols
###
registerGroup(supybot, 'protocols')
###
# supybot.protocols.irc
###
registerGroup(supybot.protocols, 'irc')
class Banmask(registry.SpaceSeparatedSetOfStrings):
validStrings = ('exact', 'nick', 'user', 'host')
def __init__(self, *args, **kwargs):
assert self.validStrings, 'There must be some valid strings. ' \
'This is a bug.'
self.__parent = super(Banmask, self)
self.__parent.__init__(*args, **kwargs)
self.__doc__ = format('Valid values include %L.',
map(repr, self.validStrings))
def help(self):
strings = [s for s in self.validStrings if s]
return format('%s Valid strings: %L.', self._help, strings)
def normalize(self, s):
lowered = s.lower()
L = list(map(str.lower, self.validStrings))
try:
i = L.index(lowered)
except ValueError:
return s # This is handled in setValue.
return self.validStrings[i]
def setValue(self, v):
v = map(self.normalize, v)
for s in v:
if s not in self.validStrings:
self.error()
self.__parent.setValue(self.List(v))
def makeBanmask(self, hostmask, options=None):
"""Create a banmask from the given hostmask. If a style of banmask
isn't specified via options, the value of
conf.supybot.protocols.irc.banmask is used.
options - A list specifying which parts of the hostmask should
explicitly be matched: nick, user, host. If 'exact' is given, then
only the exact hostmask will be used."""
channel = dynamic.channel
assert channel is None or ircutils.isChannel(channel)
(nick, user, host) = ircutils.splitHostmask(hostmask)
bnick = '*'
buser = '*'
bhost = '*'
if not options:
options = get(supybot.protocols.irc.banmask, channel)
for option in options:
if option == 'nick':
bnick = nick
elif option == 'user':
buser = user
elif option == 'host':
bhost = host
elif option == 'exact':
return hostmask
if (bnick, buser, bhost) == ('*', '*', '*') and \
ircutils.isUserHostmask(hostmask):
return hostmask
return ircutils.joinHostmask(bnick, buser, bhost)
registerChannelValue(supybot.protocols.irc, 'banmask',
Banmask(['user', 'host'], """Determines what will be used as the
default banmask style."""))
registerGlobalValue(supybot.protocols.irc, 'strictRfc',
registry.Boolean(True, """Determines whether the bot will strictly follow
the RFC; currently this only affects what strings are considered to be
nicks. If you're using a server or a network that requires you to message
a nick such as services@this.network.server then you you should set this to
False."""))
registerGlobalValue(supybot.protocols.irc, 'umodes',
registry.String('', """Determines what user modes the bot will request from
the server when it first connects. Many people might choose +i; some
networks allow +x, which indicates to the auth services on those networks
that you should be given a fake host."""))
registerGlobalValue(supybot.protocols.irc, 'vhost',
registry.String('', """Determines what vhost the bot will bind to before
connecting to the IRC server."""))
registerGlobalValue(supybot.protocols.irc, 'maxHistoryLength',
registry.Integer(1000, """Determines how many old messages the bot will
keep around in its history. Changing this variable will not take effect
until the bot is restarted."""))
registerGlobalValue(supybot.protocols.irc, 'throttleTime',
registry.Float(1.0, """A floating point number of seconds to throttle
queued messages -- that is, messages will not be sent faster than once per
throttleTime seconds."""))
registerGlobalValue(supybot.protocols.irc, 'ping',
registry.Boolean(True, """Determines whether the bot will send PINGs to the
server it's connected to in order to keep the connection alive and discover
earlier when it breaks. Really, this option only exists for debugging
purposes: you always should make it True unless you're testing some strange
server issues."""))
registerGlobalValue(supybot.protocols.irc.ping, 'interval',
registry.Integer(120, """Determines the number of seconds between sending
pings to the server, if pings are being sent to the server."""))
registerGroup(supybot.protocols.irc, 'queuing')
registerGlobalValue(supybot.protocols.irc.queuing, 'duplicates',
registry.Boolean(False, """Determines whether the bot will refuse duplicate
messages to be queued for delivery to the server. This is a safety
mechanism put in place to prevent plugins from sending the same message
multiple times; most of the time it doesn't matter, unless you're doing
certain kinds of plugin hacking."""))
registerGroup(supybot.protocols.irc.queuing, 'rateLimit')
registerGlobalValue(supybot.protocols.irc.queuing.rateLimit, 'join',
registry.Float(0, """Determines how many seconds must elapse between JOINs
sent to the server."""))
###
# supybot.protocols.http
###
registerGroup(supybot.protocols, 'http')
registerGlobalValue(supybot.protocols.http, 'peekSize',
registry.PositiveInteger(4096, """Determines how many bytes the bot will
'peek' at when looking through a URL for a doctype or title or something
similar. It'll give up after it reads this many bytes, even if it hasn't
found what it was looking for."""))
registerGlobalValue(supybot.protocols.http, 'proxy',
registry.String('', """Determines what proxy all HTTP requests should go
through. The value should be of the form 'host:port'."""))
utils.web.proxy = supybot.protocols.http.proxy
###
# Especially boring stuff.
###
registerGlobalValue(supybot, 'defaultIgnore',
registry.Boolean(False, """Determines whether the bot will ignore
unregistered users by default. Of course, that'll make it particularly
hard for those users to register or identify with the bot, but that's your
problem to solve."""))
class IP(registry.String):
"""Value must be a valid IP."""
def setValue(self, v):
if v and not utils.net.isIP(v):
self.error()
else:
registry.String.setValue(self, v)
registerGlobalValue(supybot, 'externalIP',
IP('', """A string that is the external IP of the bot. If this is the empty
string, the bot will attempt to find out its IP dynamically (though
sometimes that doesn't work, hence this variable)."""))
class SocketTimeout(registry.PositiveInteger):
"""Value must be an integer greater than supybot.drivers.poll and must be
greater than or equal to 1."""
def setValue(self, v):
if v < supybot.drivers.poll() or v < 1:
self.error()
registry.PositiveInteger.setValue(self, v)
socket.setdefaulttimeout(self.value)
registerGlobalValue(supybot, 'defaultSocketTimeout',
SocketTimeout(10, """Determines what the default timeout for socket objects
will be. This means that *all* sockets will timeout when this many seconds
has gone by (unless otherwise modified by the author of the code that uses
the sockets)."""))
registerGlobalValue(supybot, 'pidFile',
registry.String('', """Determines what file the bot should write its PID
(Process ID) to, so you can kill it more easily. If it's left unset (as is
the default) then no PID file will be written. A restart is required for
changes to this variable to take effect."""))
###
# Debugging options.
###
registerGroup(supybot, 'debug')
registerGlobalValue(supybot.debug, 'threadAllCommands',
registry.Boolean(False, """Determines whether the bot will automatically
thread all commands."""))
registerGlobalValue(supybot.debug, 'flushVeryOften',
registry.Boolean(False, """Determines whether the bot will automatically
flush all flushers *very* often. Useful for debugging when you don't know
what's breaking or when, but think that it might be logged."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| jeffmahoney/supybot | src/conf.py | Python | bsd-3-clause | 48,640 |
#!BPY
"""
Name: 'Inter-Quake Model'
Blender: 249
Group: 'Export'
Tip: 'Export Inter-Quake Model files'
"""
import struct, math
import Blender
import BPyArmature
IQM_POSITION = 0
IQM_TEXCOORD = 1
IQM_NORMAL = 2
IQM_TANGENT = 3
IQM_BLENDINDEXES = 4
IQM_BLENDWEIGHTS = 5
IQM_COLOR = 6
IQM_CUSTOM = 0x10
IQM_BYTE = 0
IQM_UBYTE = 1
IQM_SHORT = 2
IQM_USHORT = 3
IQM_INT = 4
IQM_UINT = 5
IQM_HALF = 6
IQM_FLOAT = 7
IQM_DOUBLE = 8
IQM_LOOP = 1
IQM_HEADER = struct.Struct('<16s27I')
IQM_MESH = struct.Struct('<6I')
IQM_TRIANGLE = struct.Struct('<3I')
IQM_JOINT = struct.Struct('<Ii10f')
IQM_POSE = struct.Struct('<iI20f')
IQM_ANIMATION = struct.Struct('<3IfI')
IQM_VERTEXARRAY = struct.Struct('<5I')
IQM_BOUNDS = struct.Struct('<8f')
MAXVCACHE = 32
class Vertex:
def __init__(self, index, coord, normal, uv, weights):
self.index = index
self.coord = coord
self.normal = normal
self.uv = uv
self.weights = weights
def normalizeWeights(self):
# renormalizes all weights such that they add up to 255
# the list is chopped/padded to exactly 4 weights if necessary
if not self.weights:
self.weights = [ (0, 0), (0, 0), (0, 0), (0, 0) ]
return
self.weights.sort(key = lambda weight: weight[0], reverse=True)
if len(self.weights) > 4:
del self.weights[4:]
totalweight = sum([ weight for (weight, bone) in self.weights])
if totalweight > 0:
self.weights = [ (int(round(weight * 255.0 / totalweight)), bone) for (weight, bone) in self.weights]
while len(self.weights) > 1 and self.weights[-1][0] <= 0:
self.weights.pop()
else:
totalweight = len(self.weights)
self.weights = [ (int(round(255.0 / totalweight)), bone) for (weight, bone) in self.weights]
totalweight = sum([ weight for (weight, bone) in self.weights])
while totalweight != 255:
for i, (weight, bone) in enumerate(self.weights):
if totalweight > 255 and weight > 0:
self.weights[i] = (weight - 1, bone)
totalweight -= 1
elif totalweight < 255 and weight < 255:
self.weights[i] = (weight + 1, bone)
totalweight += 1
while len(self.weights) < 4:
self.weights.append((0, self.weights[-1][1]))
def calcScore(self):
if self.uses:
self.score = 2.0 * pow(len(self.uses), -0.5)
if self.cacherank >= 3:
self.score += pow(1.0 - float(self.cacherank - 3)/MAXVCACHE, 1.5)
elif self.cacherank >= 0:
self.score += 0.75
else:
self.score = -1.0
def neighborKey(self, other):
if self.coord < other.coord:
return (self.coord.x, self.coord.y, self.coord.z, other.coord.x, other.coord.y, other.coord.z, tuple(self.weights), tuple(other.weights))
else:
return (other.coord.x, other.coord.y, other.coord.z, self.coord.x, self.coord.y, self.coord.z, tuple(other.weights), tuple(self.weights))
def __hash__(self):
return self.index
def __eq__(self, v):
return self.coord == v.coord and self.normal == v.normal and self.uv == v.uv and self.weights == v.weights
class Mesh:
def __init__(self, name, material, verts):
self.name = name
self.material = material
self.verts = [ None for v in verts ]
self.vertmap = {}
self.tris = []
def calcTangents(self):
# See "Tangent Space Calculation" at http://www.terathon.com/code/tangent.html
for v in self.verts:
v.tangent = Blender.Mathutils.Vector(0.0, 0.0, 0.0)
v.bitangent = Blender.Mathutils.Vector(0.0, 0.0, 0.0)
for (v0, v1, v2) in self.tris:
dco1 = v1.coord - v0.coord
dco2 = v2.coord - v0.coord
duv1 = v1.uv - v0.uv
duv2 = v2.uv - v0.uv
tangent = dco2*duv1.y - dco1*duv2.y
bitangent = dco2*duv1.x - dco1*duv2.x
if dco2.cross(dco1).dot(bitangent.cross(tangent)) < 0:
tangent.negate()
bitangent.negate()
v0.tangent += tangent
v1.tangent += tangent
v2.tangent += tangent
v0.bitangent += bitangent
v1.bitangent += bitangent
v2.bitangent += bitangent
for v in self.verts:
v.tangent = (v.tangent - v.normal*v.tangent.dot(v.normal)).normalize()
if v.normal.cross(v.tangent).dot(v.bitangent) < 0:
v.bitangent = -1.0
else:
v.bitangent = 1.0
def optimize(self):
# Linear-speed vertex cache optimization algorithm by Tom Forsyth
for v in self.verts:
if v:
v.index = -1
v.uses = []
v.cacherank = -1
for i, (v0, v1, v2) in enumerate(self.tris):
v0.uses.append(i)
v1.uses.append(i)
v2.uses.append(i)
for v in self.verts:
if v:
v.calcScore()
besttri = -1
bestscore = -42.0
scores = []
for i, (v0, v1, v2) in enumerate(self.tris):
scores.append(v0.score + v1.score + v2.score)
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
vertloads = 0 # debug info
vertschedule = []
trischedule = []
vcache = []
while besttri >= 0:
tri = self.tris[besttri]
scores[besttri] = -666.0
trischedule.append(tri)
for v in tri:
if v.cacherank < 0: # debug info
vertloads += 1 # debug info
if v.index < 0:
v.index = len(vertschedule)
vertschedule.append(v)
v.uses.remove(besttri)
v.cacherank = -1
v.score = -1.0
vcache = [ v for v in tri if v.uses ] + [ v for v in vcache if v.cacherank >= 0 ]
for i, v in enumerate(vcache):
v.cacherank = i
v.calcScore()
besttri = -1
bestscore = -42.0
for v in vcache:
for i in v.uses:
v0, v1, v2 = self.tris[i]
scores[i] = v0.score + v1.score + v2.score
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
while len(vcache) > MAXVCACHE:
vcache.pop().cacherank = -1
if besttri < 0:
for i, score in enumerate(scores):
if score > bestscore:
besttri = i
bestscore = score
print '%s: %d verts optimized to %d/%d loads for %d entry LRU cache' % (self.name, len(self.verts), vertloads, len(vertschedule), MAXVCACHE)
#print '%s: %d verts scheduled to %d' % (self.name, len(self.verts), len(vertschedule))
self.verts = vertschedule
# print '%s: %d tris scheduled to %d' % (self.name, len(self.tris), len(trischedule))
self.tris = trischedule
def meshData(self, iqm):
return [ iqm.addText(self.name), iqm.addText(self.material), self.firstvert, len(self.verts), self.firsttri, len(self.tris) ]
class Bone:
def __init__(self, name, index, parent, matrix):
self.name = name
self.index = index
self.parent = parent
self.matrix = matrix
self.localmatrix = matrix
if self.parent:
self.localmatrix *= parent.matrix.copy().invert()
self.numchannels = 0
self.channelmask = 0
self.channeloffsets = [ 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10 ]
self.channelscales = [ -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10 ]
def jointData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
pos = self.localmatrix.translationPart()
orient = self.localmatrix.toQuat().normalize()
if orient.w > 0:
orient.negate()
scale = self.localmatrix.scalePart()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
return [ iqm.addText(self.name), parent, pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z ]
def poseData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
return [ parent, self.channelmask ] + self.channeloffsets + self.channelscales
def calcChannelMask(self):
for i in xrange(0, 10):
self.channelscales[i] -= self.channeloffsets[i]
if self.channelscales[i] >= 1.0e-10:
self.numchannels += 1
self.channelmask |= 1 << i
self.channelscales[i] /= 0xFFFF
else:
self.channelscales[i] = 0.0
return self.numchannels
class Animation:
def __init__(self, name, frames, fps = 0.0, flags = 0):
self.name = name
self.frames = frames
self.fps = fps
self.flags = flags
def calcFrameLimits(self, bones):
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
bone.channeloffsets[0] = min(bone.channeloffsets[0], loc.x)
bone.channeloffsets[1] = min(bone.channeloffsets[1], loc.y)
bone.channeloffsets[2] = min(bone.channeloffsets[2], loc.z)
bone.channeloffsets[3] = min(bone.channeloffsets[3], quat.x)
bone.channeloffsets[4] = min(bone.channeloffsets[4], quat.y)
bone.channeloffsets[5] = min(bone.channeloffsets[5], quat.z)
bone.channeloffsets[6] = min(bone.channeloffsets[6], quat.w)
bone.channeloffsets[7] = min(bone.channeloffsets[7], scale.x)
bone.channeloffsets[8] = min(bone.channeloffsets[8], scale.y)
bone.channeloffsets[9] = min(bone.channeloffsets[9], scale.z)
bone.channelscales[0] = max(bone.channelscales[0], loc.x)
bone.channelscales[1] = max(bone.channelscales[1], loc.y)
bone.channelscales[2] = max(bone.channelscales[2], loc.z)
bone.channelscales[3] = max(bone.channelscales[3], quat.x)
bone.channelscales[4] = max(bone.channelscales[4], quat.y)
bone.channelscales[5] = max(bone.channelscales[5], quat.z)
bone.channelscales[6] = max(bone.channelscales[6], quat.w)
bone.channelscales[7] = max(bone.channelscales[7], scale.x)
bone.channelscales[8] = max(bone.channelscales[8], scale.y)
bone.channelscales[9] = max(bone.channelscales[9], scale.z)
def animData(self, iqm):
return [ iqm.addText(self.name), self.firstframe, len(self.frames), self.fps, self.flags ]
def frameData(self, bones):
data = ''
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if (bone.channelmask&0x7F) == 0x7F:
lx = int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0]))
ly = int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1]))
lz = int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2]))
qx = int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3]))
qy = int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4]))
qz = int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5]))
qw = int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6]))
data += struct.pack('<7H', lx, ly, lz, qx, qy, qz, qw)
else:
if bone.channelmask & 1:
data += struct.pack('<H', int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0])))
if bone.channelmask & 2:
data += struct.pack('<H', int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1])))
if bone.channelmask & 4:
data += struct.pack('<H', int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2])))
if bone.channelmask & 8:
data += struct.pack('<H', int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3])))
if bone.channelmask & 16:
data += struct.pack('<H', int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4])))
if bone.channelmask & 32:
data += struct.pack('<H', int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5])))
if bone.channelmask & 64:
data += struct.pack('<H', int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6])))
if bone.channelmask & 128:
data += struct.pack('<H', int(round((scale.x - bone.channeloffsets[7]) / bone.channelscales[7])))
if bone.channelmask & 256:
data += struct.pack('<H', int(round((scale.y - bone.channeloffsets[8]) / bone.channelscales[8])))
if bone.channelmask & 512:
data += struct.pack('<H', int(round((scale.z - bone.channeloffsets[9]) / bone.channelscales[9])))
return data
def frameBoundsData(self, bones, meshes, frame, invbase):
bbmin = bbmax = None
xyradius = 0.0
radius = 0.0
transforms = []
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if bone.parent:
mat *= transforms[bone.parent.index]
transforms.append(mat)
for i, mat in enumerate(transforms):
transforms[i] = invbase[i] * mat
for mesh in meshes:
for v in mesh.verts:
pos = Blender.Mathutils.Vector(0.0, 0.0, 0.0)
for (weight, bone) in v.weights:
if weight > 0:
pos += (v.coord * transforms[bone]) * (weight / 255.0)
if bbmin:
bbmin.x = min(bbmin.x, pos.x)
bbmin.y = min(bbmin.y, pos.y)
bbmin.z = min(bbmin.z, pos.z)
bbmax.x = max(bbmax.x, pos.x)
bbmax.y = max(bbmax.y, pos.y)
bbmax.z = max(bbmax.z, pos.z)
else:
bbmin = pos.copy()
bbmax = pos.copy()
pradius = pos.x*pos.x + pos.y*pos.y
if pradius > xyradius:
xyradius = pradius
pradius += pos.z*pos.z
if pradius > radius:
radius = pradius
if bbmin:
xyradius = math.sqrt(xyradius)
radius = math.sqrt(radius)
else:
bbmin = bbmax = Blender.Mathutils.Vector(0.0, 0.0, 0.0)
return IQM_BOUNDS.pack(bbmin.x, bbmin.y, bbmin.z, bbmax.x, bbmax.y, bbmax.z, xyradius, radius)
def boundsData(self, bones, meshes):
invbase = []
for bone in bones:
invbase.append(bone.matrix.copy().invert())
data = ''
for i, frame in enumerate(self.frames):
print "Calculating bounding box for %s:%d" % (self.name, i)
data += self.frameBoundsData(bones, meshes, frame, invbase)
return data
class IQMFile:
def __init__(self):
self.textoffsets = {}
self.textdata = ''
self.meshes = []
self.meshdata = []
self.numverts = 0
self.numtris = 0
self.joints = []
self.jointdata = []
self.numframes = 0
self.framesize = 0
self.anims = []
self.posedata = []
self.animdata = []
self.framedata = []
self.vertdata = []
def addText(self, str):
if not self.textdata:
self.textdata += '\x00'
self.textoffsets[''] = 0
try:
return self.textoffsets[str]
except:
offset = len(self.textdata)
self.textoffsets[str] = offset
self.textdata += str + '\x00'
return offset
def addJoints(self, bones):
for bone in bones:
self.joints.append(bone)
if self.meshes:
self.jointdata.append(bone.jointData(self))
def addMeshes(self, meshes):
self.meshes += meshes
for mesh in meshes:
mesh.firstvert = self.numverts
mesh.firsttri = self.numtris
self.meshdata.append(mesh.meshData(self))
self.numverts += len(mesh.verts)
self.numtris += len(mesh.tris)
def addAnims(self, anims):
self.anims += anims
for anim in anims:
anim.firstframe = self.numframes
self.animdata.append(anim.animData(self))
self.numframes += len(anim.frames)
def calcFrameSize(self):
for anim in self.anims:
anim.calcFrameLimits(self.joints)
self.framesize = 0
for joint in self.joints:
self.framesize += joint.calcChannelMask()
for joint in self.joints:
if self.anims:
self.posedata.append(joint.poseData(self))
print 'Exporting %d frames of size %d' % (self.numframes, self.framesize)
def writeVerts(self, file, offset):
if self.numverts <= 0:
return
file.write(IQM_VERTEXARRAY.pack(IQM_POSITION, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TEXCOORD, 0, IQM_FLOAT, 2, offset))
offset += self.numverts * struct.calcsize('<2f')
file.write(IQM_VERTEXARRAY.pack(IQM_NORMAL, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TANGENT, 0, IQM_FLOAT, 4, offset))
offset += self.numverts * struct.calcsize('<4f')
if self.joints:
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDINDEXES, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDWEIGHTS, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.coord))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<2f', *v.uv))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.normal))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4f', v.tangent.x, v.tangent.y, v.tangent.z, v.bitangent))
if self.joints:
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][1], v.weights[1][1], v.weights[2][1], v.weights[3][1]))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][0], v.weights[1][0], v.weights[2][0], v.weights[3][0]))
def calcNeighbors(self):
edges = {}
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = v0.neighborKey(v1)
e1 = v1.neighborKey(v2)
e2 = v2.neighborKey(v0)
tri = mesh.firsttri + i
try: edges[e0].append(tri)
except: edges[e0] = [tri]
try: edges[e1].append(tri)
except: edges[e1] = [tri]
try: edges[e2].append(tri)
except: edges[e2] = [tri]
neighbors = []
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = edges[v0.neighborKey(v1)]
e1 = edges[v1.neighborKey(v2)]
e2 = edges[v2.neighborKey(v0)]
tri = mesh.firsttri + i
match0 = match1 = match2 = -1
if len(e0) == 2: match0 = e0[e0.index(tri)^1]
if len(e1) == 2: match1 = e1[e1.index(tri)^1]
if len(e2) == 2: match2 = e2[e2.index(tri)^1]
neighbors.append((match0, match1, match2))
self.neighbors = neighbors
def writeTris(self, file):
for mesh in self.meshes:
for (v0, v1, v2) in mesh.tris:
file.write(struct.pack('<3I', v0.index + mesh.firstvert, v1.index + mesh.firstvert, v2.index + mesh.firstvert))
for (n0, n1, n2) in self.neighbors:
if n0 < 0: n0 = 0xFFFFFFFF
if n1 < 0: n1 = 0xFFFFFFFF
if n2 < 0: n2 = 0xFFFFFFFF
file.write(struct.pack('<3I', n0, n1, n2))
def export(self, file, usebbox = True):
self.filesize = IQM_HEADER.size
if self.textdata:
while len(self.textdata) % 4:
self.textdata += '\x00'
ofs_text = self.filesize
self.filesize += len(self.textdata)
else:
ofs_text = 0
if self.meshdata:
ofs_meshes = self.filesize
self.filesize += len(self.meshdata) * IQM_MESH.size
else:
ofs_meshes = 0
if self.numverts > 0:
ofs_vertexarrays = self.filesize
num_vertexarrays = 4
if self.joints:
num_vertexarrays += 2
self.filesize += num_vertexarrays * IQM_VERTEXARRAY.size
ofs_vdata = self.filesize
self.filesize += self.numverts * struct.calcsize('<3f2f3f4f')
if self.joints:
self.filesize += self.numverts * struct.calcsize('<4B4B')
else:
ofs_vertexarrays = 0
num_vertexarrays = 0
ofs_vdata = 0
if self.numtris > 0:
ofs_triangles = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
ofs_neighbors = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
else:
ofs_triangles = 0
ofs_neighbors = 0
if self.jointdata:
ofs_joints = self.filesize
self.filesize += len(self.jointdata) * IQM_JOINT.size
else:
ofs_joints = 0
if self.posedata:
ofs_poses = self.filesize
self.filesize += len(self.posedata) * IQM_POSE.size
else:
ofs_poses = 0
if self.animdata:
ofs_anims = self.filesize
self.filesize += len(self.animdata) * IQM_ANIMATION.size
else:
ofs_anims = 0
falign = 0
if self.framesize * self.numframes > 0:
ofs_frames = self.filesize
self.filesize += self.framesize * self.numframes * struct.calcsize('<H')
falign = (4 - (self.filesize % 4)) % 4
self.filesize += falign
else:
ofs_frames = 0
if usebbox and self.numverts > 0 and self.numframes > 0:
ofs_bounds = self.filesize
self.filesize += self.numframes * IQM_BOUNDS.size
else:
ofs_bounds = 0
file.write(IQM_HEADER.pack('INTERQUAKEMODEL', 2, self.filesize, 0, len(self.textdata), ofs_text, len(self.meshdata), ofs_meshes, num_vertexarrays, self.numverts, ofs_vertexarrays, self.numtris, ofs_triangles, ofs_neighbors, len(self.jointdata), ofs_joints, len(self.posedata), ofs_poses, len(self.animdata), ofs_anims, self.numframes, self.framesize, ofs_frames, ofs_bounds, 0, 0, 0, 0))
file.write(self.textdata)
for mesh in self.meshdata:
file.write(IQM_MESH.pack(*mesh))
self.writeVerts(file, ofs_vdata)
self.writeTris(file)
for joint in self.jointdata:
file.write(IQM_JOINT.pack(*joint))
for pose in self.posedata:
file.write(IQM_POSE.pack(*pose))
for anim in self.animdata:
file.write(IQM_ANIMATION.pack(*anim))
for anim in self.anims:
file.write(anim.frameData(self.joints))
file.write('\x00' * falign)
if usebbox and self.numverts > 0 and self.numframes > 0:
for anim in self.anims:
file.write(anim.boundsData(self.joints, self.meshes))
def findArmature():
armature = None
for obj in Blender.Object.GetSelected():
data = obj.getData()
if type(data) is Blender.Types.ArmatureType:
armature = obj
return armature
def collectBones(armature, scale):
data = armature.getData()
bones = {}
matrix = armature.getMatrix('worldspace')
worklist = [ bone for bone in data.bones.values() if not bone.parent ]
for index, bone in enumerate(worklist):
bmatrix = bone.matrix['ARMATURESPACE'] * matrix
if scale != 1.0:
bmatrix[3][0] *= scale
bmatrix[3][1] *= scale
bmatrix[3][2] *= scale
bones[bone.name] = Bone(bone.name, index, bone.parent and bones.get(bone.parent.name), bmatrix)
for child in bone.children:
if child not in worklist:
worklist.append(child)
print 'Collected %d bones' % len(worklist)
return bones
def collectAnim(armature, scale, bones, action, startframe = None, endframe = None):
if startframe is None or endframe is None:
frames = action.getFrameNumbers()
if startframe is None:
startframe = min(frames)
if endframe is None:
endframe = max(frames)
print 'Exporting action "%s" frames %d-%d' % (action.getName(), startframe, endframe)
scene = Blender.Scene.GetCurrent()
context = scene.getRenderingContext()
worldmatrix = armature.getMatrix('worldspace')
action.setActive(armature)
outdata = []
for time in xrange(startframe, endframe+1):
context.currentFrame(int(time))
scene.makeCurrent()
Blender.Set('curframe', time)
Blender.Window.Redraw()
pose = armature.getPose()
outframe = []
for bone in bones:
posematrix = pose.bones[bone.name].poseMatrix
if bone.parent:
posematrix *= pose.bones[bone.parent.name].poseMatrix.copy().invert()
else:
posematrix *= worldmatrix
if scale != 1.0:
posematrix[3][0] *= scale
posematrix[3][1] *= scale
posematrix[3][2] *= scale
loc = posematrix.translationPart()
quat = posematrix.toQuat().normalize()
if quat.w > 0:
quat.negate()
pscale = posematrix.scalePart()
pscale.x = round(pscale.x*0x10000)/0x10000
pscale.y = round(pscale.y*0x10000)/0x10000
pscale.z = round(pscale.z*0x10000)/0x10000
outframe.append((loc, quat, pscale, posematrix))
outdata.append(outframe)
return outdata
def collectAnims(armature, scale, bones, animspecs):
actions = Blender.Armature.NLA.GetActions()
animspecs = map(lambda spec: spec.strip(), animspecs.split(','))
anims = []
for animspec in animspecs:
animspec = map(lambda arg: arg.strip(), animspec.split(':'))
animname = animspec[0]
if animname not in actions:
print 'Action "%s" not found in current armature' % animname
continue
try:
startframe = int(animspec[1])
except:
startframe = None
try:
endframe = int(animspec[2])
except:
endframe = None
try:
fps = float(animspec[3])
except:
fps = 0.0
try:
flags = int(animspec[4])
except:
flags = 0
framedata = collectAnim(armature, scale, bones, actions[animname], startframe, endframe)
anims.append(Animation(animname, framedata, fps, flags))
return anims
def collectMeshes(bones, scale, useskel = True, filetype = 'IQM'):
vertwarn = []
meshes = []
for obj in Blender.Object.GetSelected():
data = obj.getData()
if (type(data) is Blender.Types.NMeshType) and data.faces:
coordmatrix = obj.getMatrix('worldspace')
normalmatrix = coordmatrix.rotationPart().invert().transpose()
if scale != 1.0:
coordmatrix *= Blender.Mathutils.ScaleMatrix(scale, 4)
materials = {}
for face in data.faces:
if len(face.v) < 3 or face.v[0].co == face.v[1].co or face.v[1].co == face.v[2].co or face.v[2].co == face.v[0].co:
continue
material = Blender.sys.basename(face.image.filename) if face.image else ''
matindex = face.materialIndex
try:
mesh = materials[obj.name, matindex, material]
except:
matprefix = (data.materials and data.materials[matindex].name) or ''
mesh = Mesh(obj.name, matprefix + Blender.sys.splitext(material)[0], data.verts)
meshes.append(mesh)
materials[obj.name, matindex, material] = mesh
verts = mesh.verts
vertmap = mesh.vertmap
faceverts = []
for i, v in enumerate(face.v):
vertco = v.co * coordmatrix
if not face.smooth:
vertno = Blender.Mathutils.Vector(face.no)
else:
vertno = v.no
vertno = (vertno * normalmatrix).normalize()
# flip V axis of texture space
if data.hasFaceUV():
vertuv = Blender.Mathutils.Vector(face.uv[i][0], 1.0 - face.uv[i][1])
else:
vertuv = Blender.Mathutils.Vector(0.0, 0.0)
vertweights = []
if useskel:
influences = data.getVertexInfluences(v.index)
for name, weight in influences:
try:
vertweights.append((weight, bones[name].index))
except:
if (name, mesh.name) not in vertwarn:
vertwarn.append((name, mesh.name))
print 'Vertex depends on non-existent bone: ' + name + ' in mesh: ' + mesh.name
if not face.smooth:
vertindex = len(verts)
vertkey = Vertex(vertindex, vertco, vertno, vertuv, vertweights)
if filetype == 'IQM':
vertkey.normalizeWeights()
verts.append(vertkey)
faceverts.append(vertkey)
continue
vertkey = Vertex(v.index, vertco, vertno, vertuv, vertweights)
if filetype == 'IQM':
vertkey.normalizeWeights()
if not verts[v.index]:
verts[v.index] = vertkey
faceverts.append(vertkey)
elif verts[v.index] == vertkey:
faceverts.append(verts[v.index])
else:
try:
vertindex = vertmap[vertkey]
faceverts.append(verts[vertindex])
except:
vertindex = len(verts)
vertmap[vertkey] = vertindex
verts.append(vertkey)
faceverts.append(vertkey)
# Quake winding is reversed
for i in xrange(2, len(faceverts)):
mesh.tris.append((faceverts[0], faceverts[i], faceverts[i-1]))
for mesh in meshes:
mesh.optimize()
if filetype == 'IQM':
mesh.calcTangents()
return meshes
def exportIQE(file, meshes, bones, anims):
file.write('# Inter-Quake Export\n\n')
for bone in bones:
if bone.parent:
parent = bone.parent.index
else:
parent = -1
file.write('joint "%s" %d\n' % (bone.name, parent))
if meshes:
pos = bone.localmatrix.translationPart()
orient = bone.localmatrix.toQuat().normalize()
if orient.w > 0:
orient.negate()
scale = bone.localmatrix.scalePart()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
for mesh in meshes:
file.write('\nmesh "%s"\n\tmaterial "%s"\n\n' % (mesh.name, mesh.material))
for v in mesh.verts:
file.write('vp %.8f %.8f %.8f\n\tvt %.8f %.8f\n\tvn %.8f %.8f %.8f\n' % (v.coord.x, v.coord.y, v.coord.z, v.uv.x, v.uv.y, v.normal.x, v.normal.y, v.normal.z))
if bones:
weights = '\tvb'
for weight in v.weights:
weights += ' %d %.8f' % (weight[1], weight[0])
file.write(weights + '\n')
file.write('\n')
for (v0, v1, v2) in mesh.tris:
file.write('fm %d %d %d\n' % (v0.index, v1.index, v2.index))
for anim in anims:
file.write('\nanimation "%s"\n\tframerate %.8f\n' % (anim.name, anim.fps))
if anim.flags&IQM_LOOP:
file.write('\tloop\n')
for frame in anim.frames:
file.write('\nframe\n')
for (pos, orient, scale, mat) in frame:
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
file.write('\n')
def exportIQM(filename, usemesh = True, useskel = True, usebbox = True, scale = 1.0, animspecs = None):
armature = findArmature()
if useskel and not armature:
print 'No armature selected'
return
if filename.lower().endswith('.iqm'):
filetype = 'IQM'
elif filename.lower().endswith('.iqe'):
filetype = 'IQE'
else:
print 'Unknown file type: %s' % filename
return
if useskel:
bones = collectBones(armature, scale)
else:
bones = {}
bonelist = sorted(bones.values(), key = lambda bone: bone.index)
if usemesh:
meshes = collectMeshes(bones, scale, useskel, filetype)
else:
meshes = []
if useskel and animspecs:
anims = collectAnims(armature, scale, bonelist, animspecs)
else:
anims = []
if filetype == 'IQM':
iqm = IQMFile()
iqm.addMeshes(meshes)
iqm.addJoints(bonelist)
iqm.addAnims(anims)
iqm.calcFrameSize()
iqm.calcNeighbors()
if filename:
try:
if filetype == 'IQM':
file = open(filename, 'wb')
else:
file = open(filename, 'w')
except IOError, (errno, strerror):
errmsg = 'IOError #%s: %s' % (errno, strerror)
if filetype == 'IQM':
iqm.export(file, usebbox)
elif filetype == 'IQE':
exportIQE(file, meshes, bonelist, anims)
file.close()
print 'Saved %s file to %s' % (filetype, filename)
else:
print 'No %s file was generated' % (filetype)
exporting_iqm = ''
EVENT_NONE = 1
EVENT_EXPORT = 2
EVENT_QUIT = 3
EVENT_FILENAME = 4
iqm_filename = Blender.Draw.Create('')
iqm_animspec = Blender.Draw.Create('')
iqm_usemesh = Blender.Draw.Create(1)
iqm_useskel = Blender.Draw.Create(1)
iqm_usebbox = Blender.Draw.Create(1)
iqm_scale = Blender.Draw.Create(1.0)
def iqm_filename_callback(filename):
global iqm_filename
iqm_filename.val = filename
def handle_event(evt, val):
if evt == Blender.Draw.ESCKEY:
Blender.Draw.Exit()
return
def handle_button_event(evt):
global exporting_iqm, iqm_filename, iqm_animspec, iqm_usemesh, iqm_useskel, iqm_usebbox, iqm_scale
if evt == EVENT_EXPORT:
if not iqm_filename:
return
exporting_iqm = iqm_filename.val
Blender.Draw.Draw()
exportIQM(iqm_filename.val, iqm_usemesh.val != 0, iqm_useskel.val != 0, iqm_usebbox.val != 0, iqm_scale.val, iqm_animspec.val)
exporting_iqm = ''
Blender.Draw.Redraw(1)
return
if evt == EVENT_QUIT:
Blender.Draw.Exit()
if evt == EVENT_FILENAME:
Blender.Window.FileSelector(iqm_filename_callback, 'Select IQM file...')
Blender.Draw.Redraw(1)
def show_gui():
global exporting_iqm, iqm_filename, iqm_animspec, iqm_usemesh, iqm_useskel, iqm_usebbox, iqm_scale
if exporting_iqm:
Blender.Draw.Text('Please wait while exporting...')
return
button_width = 240
browsebutton_width = 60
button_height = 25
Blender.Draw.Button('Export', EVENT_EXPORT, 20, 2*button_height, button_width, button_height, 'Start the IQM export')
Blender.Draw.Button('Quit', EVENT_QUIT, 20, button_height, button_width, button_height, 'Quit this script')
Blender.Draw.Button('Browse...', EVENT_FILENAME, 21+button_width-browsebutton_width, 3*button_height, browsebutton_width, button_height, 'Specify IQM file')
iqm_filename = Blender.Draw.String('IQM file: ', EVENT_NONE, 20, 3*button_height, button_width-browsebutton_width, button_height, iqm_filename.val, 255, 'IQM file to generate')
iqm_animspec = Blender.Draw.String('Animations: ', EVENT_NONE, 20, 4*button_height, button_width, button_height, iqm_animspec.val, 255, 'Specify the name of the actions to be exported')
iqm_scale = Blender.Draw.Number('Scale: ', EVENT_NONE, 20, 5*button_height, button_width, button_height, iqm_scale.val, 0.0, 1000.0, 'Scale of the exported model')
iqm_usebbox = Blender.Draw.Toggle('Bounding boxes', EVENT_NONE, 20, 6*button_height, button_width, button_height, iqm_usebbox.val, 'Generate bounding boxes')
iqm_usemesh = Blender.Draw.Toggle('Meshes', EVENT_NONE, 20, 7*button_height, button_width, button_height, iqm_usemesh.val, 'Generate meshes')
iqm_useskel = Blender.Draw.Toggle('Skeleton', EVENT_NONE, 20, 8*button_height, button_width, button_height, iqm_useskel.val, 'Generate skeleton')
Blender.Draw.Register (show_gui, handle_event, handle_button_event)
| lsalzman/iqm | blender-2.49/iqm_export.py | Python | mit | 40,875 |
import os
import sys
import threading
import time
if len(sys.argv) < 3:
sys.exit ("Usage: %s [num threads] [path]".format (sys.argv[0]))
class ThreadClass (threading.Thread):
def run(self):
total_files = 0
total_dirs = 0
for dirname, dirnames, filenames in os.walk (sys.argv[2]):
for subdirname in dirnames:
total_dirs += 1
for filename in filenames:
total_files += 1
print ("dirs: {}, files: {}".format (total_dirs, total_files))
class Main ():
def runit (self):
t_list = []
for i in range (int (sys.argv[1])):
t = ThreadClass ()
t.start ()
t_list.append (t);
for item in t_list:
t.join ()
m = Main ()
start = time.time ()
m.runit ()
print time.time() - start, "seconds"
| SaqlainAbbas/s3ffs | tests/test_dir_list.py | Python | gpl-3.0 | 867 |
import yapi
from django.test import TestCase
from django.utils import timezone
from django.conf import settings
from songs import fetch_songs
class FetchSongsTestCase(TestCase):
def test_column_enum_values_are_correct(self):
"""
The Spreadsheet Column Enum should have columns in the following order:
Song Number, Release Date, Song Title, Video URL, Download URL, Tags, Description
"""
self.assertEqual(fetch_songs.Column(1), fetch_songs.Column.SONG_NUMBER)
self.assertEqual(fetch_songs.Column(
2), fetch_songs.Column.RELEASE_DATE)
self.assertEqual(fetch_songs.Column(3), fetch_songs.Column.TITLE)
self.assertEqual(fetch_songs.Column(4), fetch_songs.Column.URL)
self.assertEqual(fetch_songs.Column(
5), fetch_songs.Column.DOWNLOAD_URL)
self.assertEqual(fetch_songs.Column(6), fetch_songs.Column.TAGS)
self.assertEqual(fetch_songs.Column(7), fetch_songs.Column.DESCRIPTION)
def test_column_enum_rejects_invalid_column(self):
"""
The Spreadsheet Column Enum should raise an error if presented with a column
that is greater than 7 or less than 1
"""
with self.assertRaises(ValueError):
fetch_songs.Column(0)
with self.assertRaises(ValueError):
fetch_songs.Column(8)
def test_jsonp_to_dict_returns_valid_dict_from_jsonp_string(self):
"""
The JSONP to Dict method should strip the padding off and give us a valid python
dict with all the appropriate values
"""
jsonp_str = 'jsonCallback({"dogs":[{"name":"Bo","breed":"Black Lab"}]});'
expected_result = {"dogs": [{"name": "Bo", "breed": "Black Lab"}]}
result = fetch_songs.jsonp_to_dict(jsonp_str)
self.assertEqual(result, expected_result)
def test_youtube_id_from_url_string_returns_correct_id(self):
url = 'https://www.youtube.com/watch?v=hQVTIJBZook'
expected_result = 'hQVTIJBZook'
result = fetch_songs.youtube_id_from_url_string(url)
self.assertEqual(result, expected_result)
def test_youtube_id_from_url_string_raises_on_non_youtube_url(self):
url = "https://vimeo.com/v=123"
with self.assertRaises(Exception):
fetch_songs.youtube_id_from_url_string(url)
def test_songs_from_spreadsheet_returns_complete_song_dicts(self):
spreadsheet_dict = {'entry': [
{'gs$cell': {'$t': 'ColumnHeader', 'row': '1', 'col': '1'}},
{'gs$cell': {'$t': '12345', 'row': '2', 'col': '1'}},
{'gs$cell': {'$t': '12/30/2016', 'row': '2', 'col': '2'}},
{'gs$cell': {'$t': 'Test Song', 'row': '2', 'col': '3'}},
{'gs$cell': {'$t': 'https://youtu.be/hQVTIJBZook', 'row': '2', 'col': '4'}},
{'gs$cell': {'$t': 'http://downlo.ad/12345', 'row': '2', 'col': '5'}},
{'gs$cell': {'$t': 'fun, folk, happy', 'row': '2', 'col': '6'}},
{'gs$cell': {'$t': 'This is test', 'row': '2', 'col': '7'}},
{'gs$cell': {'$t': '12346', 'row': '3', 'col': '1'}}
]}
expected_result = [{
'song_number': 12345,
'release_date': '12/30/2016',
'title': 'Test Song',
'url': 'https://youtu.be/hQVTIJBZook',
'download_url': 'http://downlo.ad/12345',
'tags': ['fun', 'folk', 'happy'],
'description': 'This is test',
'youtube_id': 'hQVTIJBZook'
}]
result = fetch_songs.songs_from_spreadsheet(spreadsheet_dict)
self.assertEqual(result, expected_result)
def test_update_song_with_metadata_dict_updates_required_values(self):
song = {
'song_number': 12345,
'release_date': '12/30/2016',
'title': 'Test Song',
'url': 'https://youtu.be/hQVTIJBZook',
'download_url': 'http://downlo.ad/12345',
'tags': ['fun', 'folk', 'happy'],
'description': 'This is test',
'youtube_id': 'hQVTIJBZook'
}
metadata_dict = {
'description': 'This is a test song.',
'view_count': 100,
'like_count': 10,
'dislike_count': 1,
'thumbnail_url': 'http://example.com',
'tags': ['something', 'test']
}
expected_result = {
'song_number': 12345,
'release_date': '12/30/2016',
'title': 'Test Song',
'url': 'https://youtu.be/hQVTIJBZook',
'download_url': 'http://downlo.ad/12345',
'youtube_id': 'hQVTIJBZook',
'description': 'This is a test song.',
'view_count': 100,
'like_count': 10,
'dislike_count': 1,
'thumbnail_url': 'http://example.com',
'tags': ['fun', 'folk', 'happy', 'something', 'test']
}
result = fetch_songs.update_song_with_metadata_dict(
song, metadata_dict)
self.assertEqual(result, expected_result)
def test_song_metadata_from_youtube_video_item_returns_valid_metadata_dict(self):
example_yt_id = 'hQVTIJBZook'
example_video_item = None
try:
api = yapi.YoutubeAPI(settings.YOUTUBE_API_KEY)
example_video_item = api.get_video_info(example_yt_id).items[0]
except AttributeError as e:
raise Exception(
'Unable to query Youtube API. Possibly over quota.')
result = fetch_songs.song_metadata_from_youtube_video_item(
example_video_item)
self.assertEqual(result['youtube_id'], example_yt_id)
| zaneswafford/songaday_searcher | songs/tests/test_fetch_songs.py | Python | bsd-3-clause | 5,672 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Metaprogramming Driven Development
import unittest
import struct
import cf as math
eps = 1E-05
math_testcases = 'math_testcases.txt'
test_file = 'cmath_testcases.txt'
def ulps_check(expected, got, ulps=20):
"""Given non-NaN floats `expected` and `got`,
check that they're equal to within the given number of ulps.
Returns None on success and an error message on failure."""
ulps_error = to_ulps(got) - to_ulps(expected)
if abs(ulps_error) <= ulps:
return None
return "error = {} ulps; permitted error = {} ulps".format(ulps_error,ulps)
def to_ulps(x):
"""Convert a non-NaN float x to an integer, in such a way that
adjacent floats are converted to adjacent integers. Then
abs(ulps(x) - ulps(y)) gives the difference in ulps between two
floats.
The results from this function will only make sense on platforms
where C doubles are represented in IEEE 754 binary64 format.
"""
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n < 0:
n = ~(n+2**63)
return n
def acc_check(expected, got, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# need to special case infinities, since inf - inf gives nan
if math.isinf(expected) and got == expected:
return None
error = got - expected
permitted_error = max(abs_err, rel_err * abs(expected))
if abs(error) < permitted_error:
return None
return "error = {}; permitted error = {}".format(error,
permitted_error)
class gen_math_test(type):
def __init__(cls, name, bases, nmspc):
super(gen_math_test, cls).__init__(name, bases, nmspc)
tem=list(cls.parse_testfile(test_file))
cls.uses_metaclass = lambda self : True
cls.test_sem2 = lambda self : self.assertTrue(True)
t="setattr_test"
setattr(cls,"test_%s" % t.replace('.',"_"),lambda self: self.checker(t))
cls.te=dict([[e[0],e[1:]] for e in tem])
for f in [e for e in cls.te.keys()]: setattr(cls,"test_%s" % f,(lambda g: lambda self: self.checker(g)) (f))
def parse_testfile(self,fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname) as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip(): continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags
)
class test_math_sem(unittest.TestCase):
__metaclass__ = gen_math_test
def checker(self,n):
idn=n
if n=='setattr_test':
self.assertTrue(True)
else:
fn, ar, ai, er, ei, flags=self.te[n]
# Skip if either the input or result is complex, or if
# flags is nonempty
if ai != 0. or ei != 0. or flags:
return
if fn in ['rect', 'polar']:
# no real versions of rect, polar
return
func = getattr(math, fn)
try:
result = func(ar)
except ValueError:
message = ("Unexpected ValueError in test %s:%s(%r)\n" % (idn, fn, ar))
self.fail(message)
except OverflowError:
message = ("Unexpected OverflowError in test %s:%s(%r)\n" % (idn, fn, ar))
self.fail(message)
self.ftest("%s:%s(%r)" % (idn, fn, ar), result, er)
def ftest(self, name, value, expected):
if abs(value-expected) > eps:
# Use %r instead of %f so the error message
# displays full precision. Otherwise discrepancies
# in the last few bits will lead to very confusing
# error messages
self.fail('%s returned %r, expected %r' % (name, value, expected))
def test_sem(self):
self.assertTrue(True)
def test_meta(self):
self.assertTrue(self.uses_metaclass())
class gen_math_mtest(type):
def __init__(cls, name, bases, nmspc):
super(gen_math_mtest, cls).__init__(name, bases, nmspc)
tem=list(cls.parse_mtestfile(math_testcases))
cls.uses_metaclass = lambda self : True
cls.test_sem2 = lambda self : self.assertTrue(True)
t="setattr_test"
setattr(cls,"test_%s" % t.replace('.',"_"),lambda self: self.checker(t))
cls.te=dict([[e[0],e[1:]] for e in tem])
for f in [e for e in cls.te.keys()]: setattr(cls,"test_%s" % f,(lambda g: lambda self: self.checker(g)) (f))
def parse_mtestfile(self,fname):
"""Parse a file with test values
-- starts a comment
blank lines, or lines containing only a comment, are ignored
other lines are expected to have the form
id fn arg -> expected [flag]*
"""
with open(fname) as fp:
for line in fp:
# strip comments, and skip blank lines
if '--' in line: line = line[:line.index('--')]
if not line.strip(): continue
lhs, rhs = line.split('->')
idn, fn, arg = lhs.split()
rhs_pieces = rhs.split()
exp = rhs_pieces[0]
flags = rhs_pieces[1:]
yield (idn, fn, float(arg), float(exp), flags)
class test_mmath_sem(unittest.TestCase):
__metaclass__ = gen_math_mtest
def checker(self,n):
fail_fmt = "{}:{}({!r}): expected {!r}, got {!r}"
failures = []
idn=n
if n=='setattr_test':
self.assertTrue(True)
else:
fn, arg, expected, flags=self.te[n]
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
expected = 'ValueError'
elif 'overflow' in flags:
expected = 'OverflowError'
try:
got = func(arg)
except ValueError:
got = 'ValueError'
except OverflowError:
got = 'OverflowError'
accuracy_failure = None
if isinstance(got, (math.cf_base,float)) and isinstance(expected, float):
if math.isnan(expected) and math.isnan(got):
return
if not math.isnan(expected) and not math.isnan(got):
if fn == 'lgamma':
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
accuracy_failure = acc_check(expected, got,
rel_err = 5e-15,
abs_err = 5e-15)
elif fn == 'erfc':
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
accuracy_failure = ulps_check(expected, got, 2000)
else:
accuracy_failure = ulps_check(expected, got, 20)
if accuracy_failure is None:
return
if isinstance(got, str) and isinstance(expected, str):
if got == expected:
return
fail_msg = fail_fmt.format(idn, fn, arg, expected, got)
if accuracy_failure is not None:
fail_msg += ' ({})'.format(accuracy_failure)
failures.append(fail_msg)
if failures:
self.fail('Failures in test_mtestfile:\n ' + '\n '.join(failures))
def test_sem(self):
self.assertTrue(True)
def test_meta(self):
self.assertTrue(self.uses_metaclass())
if __name__=="__main__":
unittest.main()
| AdamPrzybyla/python-cf | test_math_mdd.py | Python | lgpl-3.0 | 7,253 |
# -*- coding: utf-8 -*-
# Informations utilisée par l'installeur sous Windows.
# titre : description sommaire du module
# description : description détaillée
# defaut : par défaut, le module est-il installé ou non ?
description = {
"titre": "Statistiques",
"description": "Création de diagrammes et d'expériences statistiques.",
"groupe": "Modules",
"defaut": True,
}
| wxgeo/geophar | wxgeometrie/modules/statistiques/description.py | Python | gpl-2.0 | 432 |
"""
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField, AuthenticationForm
from django.contrib.auth.models import Group
from django.core.validators import RegexValidator
from contest.public_user import is_public_user, attends_not_ended_contest
from utils.config_info import get_config
from users.models import User, Notification
from users.models import UserProfile
# Register your models here.
admin.site.register(Notification)
admin.site.register(UserProfile)
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
USERNAME_BLACK_LIST = get_config(
'username', 'black_list', filename='user_auth.cfg').splitlines()
username = forms.CharField(label='Username',
validators=[RegexValidator(regex='^\w+$', message='Username must be Alphanumeric')])
email = forms.EmailField(label='Email')
password1 = forms.CharField(label='Password', widget=forms.PasswordInput())
password2 = forms.CharField(
label='Password Confirmation', widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def clean_username(self):
username = self.cleaned_data.get("username")
username_lower = username.lower()
for token in self.USERNAME_BLACK_LIST:
if token.lower() in username_lower:
raise forms.ValidationError(
"Username shouldn't contain %s." % token)
return username
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class AuthenticationForm(AuthenticationForm):
"""Extend default AuthenticationForm with prettified bootstrap attribute"""
username = forms.CharField(label='Username')
password = forms.CharField(label='Password', widget=forms.PasswordInput())
def __init__(self, *args, **kwargs):
super(AuthenticationForm, self).__init__(*args, **kwargs)
self.error_messages[
'inactive'] = 'This account is inactive. Check your email to activate the account!'
def confirm_login_allowed(self, user):
if is_public_user(user) and not attends_not_ended_contest(user):
user.is_active = False
user.save()
super(AuthenticationForm, self).confirm_login_allowed(user)
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('username', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': (
'username', 'password', 'email', 'user_level', 'theme', 'is_active')}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}
),
)
search_fields = ('username',)
ordering = ('username',)
filter_horizontal = ()
admin.site.register(User, UserAdmin)
# since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
| nthuoj/NTHUOJ_web | users/admin.py | Python | mit | 5,871 |
# #################################################################################### #
# ########################### IMPORTING USEFUL DEFINITIONS ########################### #
## standard library imports
import os, sys
## external libraries imports
import numpy as np
from pdbparser.pdbparser import pdbparser
from pdbparser.Utilities.Modify import set_records_attribute_values
## fullrmc imports
from fullrmc.Engine import Engine
from fullrmc.Core.Group import EmptyGroup
from fullrmc.Constraints.PairDistributionConstraints import PairDistributionConstraint
from fullrmc.Constraints.DistanceConstraints import InterMolecularDistanceConstraint
from fullrmc.Generators.Translations import TranslationGenerator
from fullrmc.Generators.Swaps import SwapPositionsGenerator
# #################################################################################### #
# ############################# DECLARE USEFUL VARIABLES ############################# #
# dirname
try:
DIR_PATH = os.path.dirname( os.path.realpath(__file__) )
except:
DIR_PATH = ''
# files name
grFileName = "pdf.exp"
pdbFileName = "system.pdb"
engineFileName = "system.rmc"
NCORES = 1
FRESH_START = False
# engine variables
experimentalDataPath = os.path.join(DIR_PATH, grFileName)
structurePdbPath = os.path.join(DIR_PATH, pdbFileName)
engineSavePath = os.path.join(DIR_PATH, engineFileName)
# #################################################################################### #
# ################################### CREATE ENGINE ################################## #
ENGINE = Engine(path=None)
if not ENGINE.is_engine(engineSavePath) or FRESH_START:
ENGINE = Engine(path=engineSavePath, freshStart=True)
ENGINE.set_pdb(structurePdbPath)
## create and add pair distribution constraint
PDF_CONSTRAINT = PairDistributionConstraint(experimentalData=experimentalDataPath, weighting="atomicNumber")
ENGINE.add_constraints([PDF_CONSTRAINT])
## create and add intermolecular distances constraint
EMD_CONSTRAINT = InterMolecularDistanceConstraint()
ENGINE.add_constraints([EMD_CONSTRAINT])
EMD_CONSTRAINT.set_type_definition("element")
EMD_CONSTRAINT.set_pairs_distance([('Co','Co',2.00),('Co','Mn',2.00),('Co','Ni',2.00),('Co','Li',2.00),('Co','O' ,1.7),
('Mn','Mn',2.00),('Mn','Li',2.00),('Mn','Ni',2.00),('Mn','O' ,1.7),
('Ni','Ni',2.00),('Ni','Li',2.00),('Ni','O' ,1.7),
('Li','Li',2.40),('Li' ,'O',1.8),
('O' ,'O' ,1.20),])
## save engine
ENGINE.save()
else:
ENGINE = ENGINE.load(engineSavePath)
## unpack constraints before fitting in case tweaking is needed
PDF_CONSTRAINT, EMD_CONSTRAINT = ENGINE.constraints
# #################################################################################### #
# ############################### DEFINE DIFFERENT RUNS ############################## #
def normal_run(numberOfSteps=100000, saveFrequency=10000):
## reset groups as atoms
ENGINE.set_groups_as_atoms()
## run engine
ENGINE.run(numberOfSteps=numberOfSteps, saveFrequency=saveFrequency)
def swaps_run(numberOfSteps=100000, saveFrequency=10000):
## reset groups as atoms
ENGINE.set_groups_as_atoms()
## build swap lists
ALL_ELEMENTS = ENGINE.get_original_data('allElements')
liSwaps = [[idx] for idx in range(len(ALL_ELEMENTS)) if ALL_ELEMENTS[idx]=='li' or ALL_ELEMENTS[idx]=='Li']
meSwaps = [[idx] for idx in range(len(ALL_ELEMENTS)) if ALL_ELEMENTS[idx] in ('co','ni','mn') or ALL_ELEMENTS[idx] in ('Co','Ni','Mn')]
## set swap generators
for g in ENGINE.groups:
idx = g.indexes[0]
elIdx = ALL_ELEMENTS[idx]
if elIdx in ('li','Li'):
SPG=SwapPositionsGenerator(swapList=meSwaps)
g.set_move_generator(SPG)
elif elIdx in ('co','Co','mn','Mn','ni','Ni'):
SPG=SwapPositionsGenerator(swapList=liSwaps)
g.set_move_generator(SPG)
else:
g.set_move_generator( TranslationGenerator(amplitude=0.05) )
## run engine
ENGINE.run(numberOfSteps=numberOfSteps, saveFrequency=saveFrequency)
def removes_run(numberOfSteps=100, saveFrequency=100):
## compute indexes lists
ALL_ELEMENTS = ENGINE.get_original_data('allElements')
oIndexes = [idx for idx in range(len(ALL_ELEMENTS)) if ALL_ELEMENTS[idx]=='o' or ALL_ELEMENTS[idx]=='O']
liIndexes = [idx for idx in range(len(ALL_ELEMENTS)) if ALL_ELEMENTS[idx]=='li' or ALL_ELEMENTS[idx]=='Li']
meIndexes = [idx for idx in range(len(ALL_ELEMENTS)) if ALL_ELEMENTS[idx] in ('co','ni','mn') or ALL_ELEMENTS[idx] in ('Co','Ni','Mn')]
## create empty group to remove oxygen.
## By default EmptyGroup move generator is AtomsRemoveGenerator with its atomsList
## None which means it will remove any atom from system.
RO = EmptyGroup()
RO.moveGenerator.set_maximum_collected(20)
RO.moveGenerator.set_atoms_list(oIndexes)
## create empty group to remove lithium
RLi = EmptyGroup()
RLi.moveGenerator.set_maximum_collected(500)
RLi.moveGenerator.set_atoms_list(liIndexes)
## create empty group to remove cobalt
RCo = EmptyGroup()
RCo.moveGenerator.set_maximum_collected(500)
RCo.moveGenerator.set_atoms_list(meIndexes)
## set groups to engine
ENGINE.set_groups([RO,RLi,RCo])
## run engine
ENGINE.run(numberOfSteps=numberOfSteps, saveFrequency=saveFrequency)
# #################################################################################### #
# ################################## RUN SIMULATION ################################## #
## run normal
normal_run(numberOfSteps=100000, saveFrequency=50000)
PDF_CONSTRAINT.set_adjust_scale_factor((10,0.7,1.3))
normal_run(numberOfSteps=250000, saveFrequency=50000)
## add first swapping frame
if not ENGINE.is_frame('swap_1'):
ENGINE.add_frame('swap_1')
## use swap_1 frame and run swapping
ENGINE.set_used_frame('swap_1')
swaps_run(numberOfSteps=200000, saveFrequency=50000)
normal_run(numberOfSteps=250000, saveFrequency=50000)
## add first removing frame
if not ENGINE.is_frame('removes_2'):
ENGINE.add_frame('removes_2')
## use removes_2 frame and run some removes
ENGINE.set_used_frame('removes_2')
for _ in range(20):
# remove as little as possible then try to refine
removes_run(numberOfSteps=100, saveFrequency=100)
normal_run(numberOfSteps=100000, saveFrequency=10000)
swaps_run(numberOfSteps=50000, saveFrequency=10000)
##########################################################################################
###################################### CALL plot.py ######################################
os.system("%s %s"%(sys.executable, os.path.join(DIR_PATH, 'plot.py')))
| bachiraoun/fullrmc | Examples/removes/run.py | Python | agpl-3.0 | 6,878 |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for instances, volumes, and floating ips."""
import sys
from oslo.config import cfg
from oslo.utils import importutils
import webob
from neutron.common import exceptions
from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
QUOTA_DB_MODULE = 'neutron.db.quota_db'
QUOTA_DB_DRIVER = 'neutron.db.quota_db.DbQuotaDriver'
QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver'
quota_opts = [
cfg.ListOpt('quota_items',
default=['network', 'subnet', 'port'],
help=_('Resource name(s) that are supported in quota '
'features')),
cfg.IntOpt('default_quota',
default=-1,
help=_('Default number of resource allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_network',
default=10,
help=_('Number of networks allowed per tenant.'
'A negative value means unlimited.')),
cfg.IntOpt('quota_subnet',
default=10,
help=_('Number of subnets allowed per tenant, '
'A negative value means unlimited.')),
cfg.IntOpt('quota_port',
default=50,
help=_('Number of ports allowed per tenant. '
'A negative value means unlimited.')),
cfg.StrOpt('quota_driver',
default=QUOTA_DB_DRIVER,
help=_('Default driver to use for quota checks')),
]
# Register the configuration options
cfg.CONF.register_opts(quota_opts, 'QUOTAS')
class ConfDriver(object):
"""Configuration driver.
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the default values
in neutron.conf.
"""
def _get_quotas(self, context, resources, keys):
"""Get quotas.
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve.
"""
# Filter resources
desired = set(keys)
sub_resources = dict((k, v) for k, v in resources.items()
if k in desired)
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
unknown = desired - set(sub_resources.keys())
raise exceptions.QuotaResourceUnknown(unknown=sorted(unknown))
quotas = {}
for resource in sub_resources.values():
quotas[resource.name] = resource.default
return quotas
def limit_check(self, context, tenant_id,
resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param tennant_id: The tenant_id to check quota.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exceptions.InvalidQuotaValue(unders=sorted(unders))
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys())
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
@staticmethod
def get_tenant_quotas(context, resources, tenant_id):
quotas = {}
sub_resources = dict((k, v) for k, v in resources.items())
for resource in sub_resources.values():
quotas[resource.name] = resource.default
return quotas
@staticmethod
def get_all_quotas(context, resources):
return []
@staticmethod
def delete_tenant_quota(context, tenant_id):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
@staticmethod
def update_quota_limit(context, tenant_id, resource, limit):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag):
"""Initializes a resource.
:param name: The name of the resource, i.e., "instances".
:param flag: The name of the flag or configuration option
"""
self.name = name
self.flag = flag
@property
def default(self):
"""Return the default value of the quota."""
return getattr(cfg.CONF.QUOTAS,
self.flag,
cfg.CONF.QUOTAS.default_quota)
class CountableResource(BaseResource):
"""Describe a resource where the counts are determined by a function."""
def __init__(self, name, count, flag=None):
"""Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., netowk, subnet,
etc.,. A CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
:param name: The name of the resource, i.e., "instances".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(CountableResource, self).__init__(name, flag=flag)
self.count = count
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
self._resources = {}
self._driver = None
self._driver_class = quota_driver_class
def get_driver(self):
if self._driver is None:
_driver_class = (self._driver_class or
cfg.CONF.QUOTAS.quota_driver)
if (_driver_class == QUOTA_DB_DRIVER and
QUOTA_DB_MODULE not in sys.modules):
# If quotas table is not loaded, force config quota driver.
_driver_class = QUOTA_CONF_DRIVER
LOG.info(_LI("ConfDriver is used as quota_driver because the "
"loaded plugin does not support 'quotas' table."))
if isinstance(_driver_class, basestring):
_driver_class = importutils.import_object(_driver_class)
self._driver = _driver_class
LOG.info(_LI('Loaded quota_driver: %s.'), _driver_class)
return self._driver
def __contains__(self, resource):
return resource in self._resources
def register_resource(self, resource):
"""Register a resource."""
if resource.name in self._resources:
LOG.warn(_LW('%s is already registered.'), resource.name)
return
self._resources[resource.name] = resource
def register_resource_by_name(self, resourcename):
"""Register a resource by name."""
resource = CountableResource(resourcename, _count_resource,
'quota_' + resourcename)
self.register_resource(resource)
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource: The name of the resource, as a string.
"""
# Get the resource
res = self._resources.get(resource)
if not res or not hasattr(res, 'count'):
raise exceptions.QuotaResourceUnknown(unknown=[resource])
return res.count(context, *args, **kwargs)
def limit_check(self, context, tenant_id, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
"""
return self.get_driver().limit_check(context, tenant_id,
self._resources, values)
@property
def resources(self):
return self._resources
QUOTAS = QuotaEngine()
def _count_resource(context, plugin, resources, tenant_id):
count_getter_name = "get_%s_count" % resources
# Some plugins support a count method for particular resources,
# using a DB's optimized counting features. We try to use that one
# if present. Otherwise just use regular getter to retrieve all objects
# and count in python, allowing older plugins to still be supported
try:
obj_count_getter = getattr(plugin, count_getter_name)
return obj_count_getter(context, filters={'tenant_id': [tenant_id]})
except (NotImplementedError, AttributeError):
obj_getter = getattr(plugin, "get_%s" % resources)
obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]})
return len(obj_list) if obj_list else 0
def register_resources_from_config():
resources = []
for resource_item in cfg.CONF.QUOTAS.quota_items:
resources.append(CountableResource(resource_item, _count_resource,
'quota_' + resource_item))
QUOTAS.register_resources(resources)
register_resources_from_config()
| leeseuljeong/leeseulstack_neutron | neutron/quota.py | Python | apache-2.0 | 12,508 |
__author__ = 'nlelab'
import os
import MySQLdb
import time
import json
import config as cfg
print os.getcwd()
db = MySQLdb.connect(host=cfg.mysql['host'], # your host, usually localhost
user=cfg.mysql['user'], # your username
passwd=cfg.mysql['passwd'], # your password
db=cfg.mysql['db']) # name of the data base
cur = db.cursor()
db.set_character_set('utf8')
cur.execute('SET NAMES utf8;')
cur.execute('SET CHARACTER SET utf8;')
cur.execute('SET character_set_connection=utf8;')
db.commit()
cur.execute("SELECT * FROM retweet")
tweets = cur.fetchall()
print len(tweets)
for tweet in tweets:
jsonTweet=json.loads(tweet[1])
#print jsonTweet
cur.execute("INSERT relation (user_source, user_target,type,date,tweet_id) VALUES (%s,%s,%s,%s,%s) on duplicate key update tweet_id=tweet_id",
(jsonTweet['user']['id'],
jsonTweet['retweeted_status']['user']['id'],
'RT',
time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(jsonTweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')),
jsonTweet['id'])
)
db.commit() | mirkolai/polarization_of_emotional_engagement_in_social_media | 012 - analysis script - extract retweet relation.py | Python | mit | 1,154 |
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .email import Email
from .enums import SheetEmailFormat
from .format_details import FormatDetails
from ..types import *
from ..util import serialize
from ..util import deserialize
class SheetEmail(Email):
"""Smartsheet SheetEmail data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the SheetEmail model."""
super(SheetEmail, self).__init__(None, base_obj)
self._base = None
if base_obj is not None:
self._base = base_obj
self._format_ = EnumeratedValue(SheetEmailFormat)
self._format_details = TypedObject(FormatDetails)
if props:
deserialize(self, props)
self.__initialized = True
def __getattr__(self, key):
if key == 'format':
return self.format_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == 'format':
self.format_ = value
else:
super(SheetEmail, self).__setattr__(key, value)
@property
def format_(self):
return self._format_
@format_.setter
def format_(self, value):
self._format_.set(value)
@property
def format_details(self):
return self._format_details.value
@format_details.setter
def format_details(self, value):
self._format_details.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
| smartsheet-platform/smartsheet-python-sdk | smartsheet/models/sheet_email.py | Python | apache-2.0 | 2,274 |
# Copyright 2018 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
import numpy as np
import six
import deepmind_lab
class TeleporterTest(absltest.TestCase):
def test_movement(self):
fps = 60
env = deepmind_lab.Lab(
'tests/teleporter_test', [
'VEL.TRANS',
'DEBUG.POS.TRANS',
'DEBUG.POS.ROT',
],
config={
'fps': str(fps),
'width': '80',
'height': '80',
})
action_spec = env.action_spec()
action_index = {action['name']: i for i, action in enumerate(action_spec)}
action = np.zeros([len(action_spec)], dtype=np.intc)
env.reset()
vel = env.observations()['VEL.TRANS']
self.assertTrue(np.array_equal(vel, np.array([0, 0, 0])))
# Agent begins facing south
initial_facing = env.observations()['DEBUG.POS.ROT']
self.assertTrue(np.allclose(initial_facing, np.array([0, -90, 0]),
atol=0.1))
# Player moves straight ahead through the teleporter
action[action_index['MOVE_BACK_FORWARD']] = 1
self.assertEqual(env.events(), [])
for _ in six.moves.range(120):
p_before = env.observations()['DEBUG.POS.TRANS']
env.step(action, 1)
p_after = env.observations()['DEBUG.POS.TRANS']
if p_after[1] - p_before[1] > 100:
break
else:
self.fail('Failed to teleport')
self.assertEqual(env.events(), [('PLAYER_TELEPORTED', [])])
env.step(action, 1)
self.assertEqual(env.events(), [])
if __name__ == '__main__':
if 'TEST_SRCDIR' in os.environ:
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
absltest.main()
| deepmind/lab | python/tests/teleporter_test.py | Python | gpl-2.0 | 2,547 |
import logging
import signal
from autotest.client.shared import error
from virttest import utils_misc
from tests import guest_test
@error.context_aware
def run_yonit_bitmap(test, params, env):
"""
Run yonit bitmap benchmark in Windows guests, especially win7 32bit,
for regression test of BZ #556455.
Run the benchmark (infinite) loop background using
run_guest_test_background, and detect the existence of the process
in guest.
1. If the process exits before test timeout, that means the benchmark
exits unexpectedly, and BSOD may have happened, which can be verified
from the screenshot saved by virt-test.
2. If just timeout happen, this test passes, i.e. the guest stays
good while running the benchmark in the given time.
:param test: Kvm test object
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
sec_per_day = 86400 # seconds per day
test_timeout = int(params.get("test_timeout", sec_per_day))
login_timeout = int(params.get("login_timeout", 360))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=login_timeout)
# Since the benchmark runs into an infinite loop, the background process
# will never return, unless we get a BSOD.
#
# We set the test_timeout of the background guest_test much bigger than
# that of this test to make sure that the background benchmark is still
# running while the the foreground detecting is on going.
error.context("run benchmark test in background", logging.info)
params["test_timeout"] = test_timeout * 2 + sec_per_day
logging.info("set Yonit bitmap test timeout to"
" %ss" % params["test_timeout"])
pid = guest_test.run_guest_test_background(test, params, env)
if pid < 0:
session.close()
raise error.TestError("Could not create child process to execute "
"guest_test background")
def is_yonit_benchmark_launched():
if session.get_command_status(
'tasklist | find /I "compress_benchmark_loop"') != 0:
logging.debug("yonit bitmap benchmark was not found")
return False
return True
error.context("Watching Yonit bitmap benchmark is running until timeout",
logging.info)
try:
# Start detecting whether the benchmark is started a few mins
# after the background test launched, as the downloading
# will take some time.
launch_timeout = login_timeout
if utils_misc.wait_for(is_yonit_benchmark_launched,
launch_timeout, 180, 5):
logging.debug("Yonit bitmap benchmark was launched successfully")
else:
raise error.TestError("Failed to launch yonit bitmap benchmark")
# If the benchmark exits before timeout, errors happened.
if utils_misc.wait_for(lambda: not is_yonit_benchmark_launched(),
test_timeout, 60, 10):
raise error.TestError("Yonit bitmap benchmark exits unexpectly")
else:
if session.is_responsive():
logging.info("Guest stays good until test timeout")
else:
raise error.TestFail("Guest is dead")
finally:
logging.info("Kill the background benchmark tracking process")
utils_misc.safe_kill(pid, signal.SIGKILL)
guest_test.wait_guest_test_background(pid)
session.close()
| spiceqa/virt-test | qemu/tests/yonit_bitmap.py | Python | gpl-2.0 | 3,573 |
"""dc views """
import datetime as dt
import collections
from collections import OrderedDict as od_dict
import os
import json
from nested_dict import nested_dict
import pytz
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from .models import User, Proj, Module, DcRun
from .views import gen_date_range, date_range, zone_time, update_userinfo
def gen_slack_interv(slk_lst):
"""slack values range list"""
slk_num_lst = [0]*5
if slk_lst:
for slk in slk_lst:
slk = slk*(-1000)
if slk < 100:
slk_num_lst[0] += 1
elif 100 <= slk < 300:
slk_num_lst[1] += 1
elif 300 <= slk < 500:
slk_num_lst[2] += 1
elif 500 <= slk < 1000:
slk_num_lst[3] += 1
else:
slk_num_lst[4] += 1
return slk_num_lst
def gen_tm_dic(lv_dic):
"""statistics group path numbers"""
gp_num_dic = collections.defaultdict(int)
if lv_dic:
for gse, slk_lst in lv_dic.items():
slk_len = len(slk_lst)
lv_dic[gse] = {'slk_num':gen_slack_interv(slk_lst),
'gse_sum': slk_len}
gp_num_dic[gse.split('**')[0]] += slk_len
lv_dic['gp'] = od_dict(gp_num_dic)
return dict(lv_dic)
def dict_deal_key(key_dic, val_dic, merg_dic):
"""deal dict key format"""
for gpk, gpv in key_dic.items():
if gpk in ("Cell Count", "Area", "qor_pw_rpt", "Power"):
merg_dic[gpk.replace(" ", "_").lower()] = {
"items": val_dic[gpk], "values": gpv}
else:
merg_dic['tpg_items'] = val_dic[gpk]
merg_dic['tpg'][gpk] = gpv
return merg_dic
def merger_dic(dic1, dic2=None):
"""Merger the same keys of two dict"""
key_dic = collections.defaultdict(list)
val_dic = collections.defaultdict(list)
merg_dic = collections.defaultdict(dict)
if dic2:
for gpk, gpv in dic1.items():
if gpk not in dic2:
continue
if gpk == "qor_pw_rpt":
merg_dic[gpk] = {'ref': dict(gpv),
'tar': dict(dic2[gpk])}
else:
for gpkk, gpvv in gpv.items():
key_dic[gpk].append(gpvv)
key_dic[gpk].append(dic2[gpk].get(gpkk, "NA"))
val_dic[gpk].append(gpkk)
else:
for gpk, gpv in dic1.items():
if gpk == "qor_pw_rpt":
merg_dic[gpk] = {'ref': dict(gpv)}
else:
for gpkk, gpvv in gpv.items():
key_dic[gpk].append(gpvv)
val_dic[gpk].append(gpkk)
return dict(dict_deal_key(key_dic, val_dic, merg_dic))
def gen_qor_dic(rpt_qs, ref_tm, tar_tm):
"""gen qor report key parameters dic """
ref_dic = query_rpt_qs(rpt_qs, ref_tm)
if not ref_dic:
return {"ref_qor_null": True,
"ref_tm": ref_tm}
if tar_tm != 'no_tm':
tar_dic = query_rpt_qs(rpt_qs, tar_tm)
if not tar_dic:
return {"tar_qor_null": True,
"tar_tm": tar_tm}
qor_dic = merger_dic(ref_dic, tar_dic)
else:
qor_dic = merger_dic(ref_dic)
if qor_dic:
return qor_dic
def query_tm_lvl(rpt_qs, ref_tm, lvl='Level 1'):
"""choose different levels to show startpoint to
endpoint numbers in differe intervals"""
lv_dic = collections.defaultdict(list)
nlv_dic = collections.defaultdict(list)
tm_dic = query_time_qs(rpt_qs, ref_tm)
for tm_k, tm_val in tm_dic.items():
if tm_val and tm_k not in ('gp_sum', 'log_path'):
for tm_lst in tm_val:
if 'in2' in tm_k or '2out' in tm_k:
tm_key = (tm_k+'**'+tm_lst[0]+'**'+tm_lst[1])
nlv_dic[tm_key].append(tm_lst[2])
else:
st_pit_lst = tm_lst[0].split('/')
ed_pit_lst = tm_lst[1].split('/')
lvn = int(lvl.split(" ")[1])
tm_key = (tm_k+'**'+os.path.join(*st_pit_lst[0:lvn])
+ '**' + os.path.join(*ed_pit_lst[0:lvn]))
lv_dic[tm_key].append(tm_lst[2])
rpt_dic = {'nlv_gp': gen_tm_dic(nlv_dic), 'lv_gp': gen_tm_dic(lv_dic),
'gp_sum': tm_dic['gp_sum'], 'log_path': tm_dic['log_path']}
return rpt_dic
def query_time_qs(rpt_qs, run_tm):
"""dc time report queryset interface"""
rpt_dic = collections.defaultdict(list)
dcrun_qs = rpt_qs.filter(run_time__range=gen_date_range(run_tm))
tmrpt_dic = dcrun_qs.first().time_rpt
if tmrpt_dic:
gp_sum = 0
for se_dic in tmrpt_dic["timing"]:
rpt_dic[se_dic["Path Group"]].append([se_dic["Startpoint"],
se_dic["Endpoint"],
se_dic["slack"]])
gp_sum += 1
rpt_dic['gp_sum'] = gp_sum
rpt_dic["log_path"] = tmrpt_dic["log_path"]
return rpt_dic
def query_rpt_qs(rpt_qs, run_tm):
"""dc qor and power reports queryset interface"""
rpt_dic = collections.defaultdict(dict)
dcrun_qs = rpt_qs.filter(run_time__range=gen_date_range(run_tm))
qorpt_dic = dcrun_qs.first().qor_rpt
if qorpt_dic:
rpt_dic['qor_pw_rpt'] = od_dict({'run_time': run_tm,
'qor_log': qorpt_dic["log_path"]})
for qor_type, qor_dic in qorpt_dic.items():
if qor_type in ("log_path",
"Design Rules",
"Compile CPU Statistics"):
continue
if qor_type not in ("Area", "Cell Count"):
qor_dic = {type_nm: qor_dic[type_nm] for type_nm in qor_dic.keys() -
{"Worst Hold Violation",
"Total Hold Violation",
"No. of Hold Violations"}}
qor_dic = od_dict(qor_dic)
rpt_dic[qor_type] = qor_dic
for qor, qor_dic in rpt_dic.items():
if (qor not in ('Cell Count', 'Area', 'qor_pw_rpt') and
qor_dic.get("Critical Path Clk Period", None)):
qor_dic.move_to_end("Critical Path Clk Period")
rpt_dic[qor] = od_dict(reversed(list(qor_dic.items())))
pwrpt_dic = dcrun_qs.first().power_rpt
if pwrpt_dic:
rpt_dic['qor_pw_rpt']['pw_log'] = pwrpt_dic["log_path"]
rpt_dic['Power'] = od_dict({"Internal": pwrpt_dic["internal_pw"],
"Switching": pwrpt_dic["swithing_pw"],
"Leakage": pwrpt_dic["leakage_pw"],
"Lotal": pwrpt_dic["total_pw"]})
return rpt_dic
def gen_ew_dic(dic):
"""gen dc.log error and warning info dic"""
ew_dic, count = {}, 0
for ew_type, ew_lst in dic.items():
ew_num = len(ew_lst)
ew_dic[ew_type] = ew_num
count += ew_num
ew_dic['ew_num'] = count
return ew_dic
def query_ew_dic(dcrun_qs):
"""generate dc.log detail infomation table"""
model_lst = []
if dcrun_qs.exists():
for dcrun_obj in dcrun_qs:
model_dic = nested_dict()
model_dic["module"] = dcrun_obj.m_name
model_dic["cpu_time"] = dcrun_obj.cpu_time
model_dic["run_time"] = zone_time(dcrun_obj.run_time)
model_dic["log_path"] = dcrun_obj.dc_log
error_dic = dcrun_obj.error_info
warning_dic = dcrun_obj.warning_info
if error_dic:
model_dic['ew_info']['error'] = gen_ew_dic(error_dic)
model_dic['status'] = 'fail'
else:
model_dic['ew_info']['error'] = None
model_dic['status'] = ('running'
if model_dic['cpu_time'] == 'NA' else 'pass')
if warning_dic:
model_dic['ew_info']['warn'] = gen_ew_dic(warning_dic)
model_lst.append(model_dic)
else:
model_lst = [{'module': None,
'cpu_time': None,
'status': None,
'log_path': None,
'run_time': None,
'ew_info':{'warn':None,
'error': None}}]
return model_lst
class DcUserList(ListView):
"""Dc index page"""
template_name = 'pj_app/dc.html'
def __init__(self):
self.user_list = []
def get_queryset(self):
for user_obj in User.objects.all():
if user_obj.asic_info.get("dc"):
self.user_list.append(user_obj.name)
def get_context_data(self, **kwargs):
context = super(DcUserList, self).get_context_data(**kwargs)
context['user_list'] = self.user_list
context['pj_type'] = 'Dc'
context['user'] = settings.DC_USER
context['team'] = settings.DC_TEAM
return context
def dc_get_loginfo(request):
"""ajax---load dc log informations"""
model = request.GET.get('model')
user = request.GET.get('user')
proj = request.GET.get('proj')
module = request.GET.get('module')
tstart = request.GET.get('tstart')
tend = request.GET.get('tend')
dcrun_qs = DcRun.objects.filter(user__name=user,
p_name=proj,
run_time__range=date_range(tstart, tend))
if model == 'sg_md':
dcrun_qs = dcrun_qs.filter(module__name=f"{module}___{proj}")
model_lst = query_ew_dic(dcrun_qs)
return HttpResponse(json.dumps(model_lst), content_type='application/json')
def dc_get_tminfo(request):
"""ajax----load different level to show startpoint to endpoint"""
user = request.GET.get('user')
proj = request.GET.get('proj')
module = request.GET.get('module')
time = request.GET.get('time')
level = request.GET.get('level')
dcrun_qs = DcRun.objects.filter(user__name=user,
p_name=proj,
module__name=f"{module}___{proj}")
rpt_type_dic = query_tm_lvl(dcrun_qs, time, lvl=level)
return HttpResponse(json.dumps(rpt_type_dic), content_type='application/json')
def dc_detail_loginfo(request, path):
"""load local file to show in the brower"""
if os.path.exists(path):
with open(path) as file:
log_str = file.read()
else:
log_str = "Error: No file found"
return render(request, 'pj_app/dc_detail_log.html',
{'log_str': log_str})
def dc_get_rpt(request, **kwargs):
"""singe dc report detail inormation"""
dcrun_qs = DcRun.objects.filter(user__name=kwargs["user"],
p_name=kwargs["proj"],
module__name=f"{kwargs['module']}___{kwargs['proj']}")
if kwargs["rpt_type"] == 'qor_rpt':
rpt_type_dic = gen_qor_dic(dcrun_qs, kwargs["ref_tm"], kwargs["tar_tm"])
return render(request, 'pj_app/dc_qor_rpt.html', {'ref_tm': kwargs["ref_tm"],
'tar_tm': kwargs["tar_tm"],
'rpt_data': rpt_type_dic})
elif kwargs["rpt_type"] == 'tm_rpt':
rpt_type_dic = query_tm_lvl(dcrun_qs, kwargs["ref_tm"])
user_info = {"user": kwargs["user"],
"proj": kwargs["proj"],
"module": kwargs["module"],
"ref_tm": kwargs["ref_tm"]}
return render(request, 'pj_app/dc_tm_rpt.html', {'ref_tm': kwargs["ref_tm"],
'user_info': json.dumps(user_info),
'rpt_data': rpt_type_dic})
##post data to database
@csrf_exempt
def dc_query_insert_case(request):
"""post data to database interface"""
if request.method == 'POST':
dc_dic = json.loads(request.body.decode())
user_obj, _ = User.objects.update_or_create(name=dc_dic['user'])
update_userinfo('dc', user_obj, dc_dic['proj'], dc_dic['design_name'])
proj_obj, _ = Proj.objects.update_or_create(name=dc_dic['proj'])
module_obj, _ = Module.objects.update_or_create(
name=f"{dc_dic['design_name']}___{dc_dic['proj']}")
rtime = pytz.timezone(settings.TIME_ZONE).localize(
dt.datetime.fromtimestamp(dc_dic['run_time']))
dcrun_qs = DcRun.objects.filter(run_time=rtime)
if dcrun_qs.exists():
dcrun_obj = dcrun_qs.first()
dcrun_obj.cpu_time = dc_dic['cpu_usage']
else:
dcrun_obj = DcRun.objects.create(
user=user_obj,
proj=proj_obj,
module=module_obj,
p_name=dc_dic['proj'],
m_name=dc_dic['design_name'],
clock=dc_dic['clk_freq'],
cpu_time=dc_dic['cpu_usage'],
dc_log=dc_dic['dc_log'].get('log_path', ""),
run_time=rtime)
dcrun_obj.error_info = dc_dic['dc_log'].get('error', {})
dcrun_obj.warning_info = dc_dic['dc_log'].get('warning', {})
if dc_dic['status'] == 'finished':
dcrun_obj.time_rpt = dc_dic.get('tm_rpt', {})
dcrun_obj.qor_rpt = dc_dic.get('qor_rpt', {})
dcrun_obj.power_rpt = dc_dic.get('pw_rpt', {})
dcrun_obj.save()
return HttpResponse(json.dumps({}), content_type='application/json')
| cmos3511/cmos_linux | python/pj/cas_site/pj_app/dc_views.py | Python | gpl-3.0 | 13,719 |
#!/usr/bin/env python
"""
Find files in the FileCatalog using file metadata
"""
import DIRAC
if __name__ == "__main__":
from DIRAC.Core.Base import Script
Script.registerSwitch( '', 'Path=', ' Path to search for' )
Script.registerSwitch( '', 'SE=', ' (comma-separated list of) SEs/SE-groups to be searched' )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [options] metaspec [metaspec ...]' % Script.scriptName,
'Arguments:',
' metaspec: metadata index specification (of the form: "meta=value" or "meta<value", "meta!=value", etc.)',
'', 'Examples:',
' $ dirac-dms-find-lfns Path=/lhcb/user "Size>1000" "CreationDate<2015-05-15"',
] )
)
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.MetaQuery import MetaQuery, FILE_STANDARD_METAKEYS
from DIRAC import gLogger
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import resolveSEGroup
path = '/'
seList = None
for opt, val in Script.getUnprocessedSwitches():
if opt == 'Path':
path = val
elif opt == 'SE':
seList = resolveSEGroup( val.split( ',' ) )
if seList:
args.append( "SE=%s" % ','.join( seList ) )
fc = FileCatalog()
result = fc.getMetadataFields()
if not result['OK']:
gLogger.error( 'Can not access File Catalog:', result['Message'] )
DIRAC.exit( -1 )
typeDict = result['Value']['FileMetaFields']
typeDict.update( result['Value']['DirectoryMetaFields'] )
# Special meta tags
typeDict.update( FILE_STANDARD_METAKEYS )
if len( args ) < 1:
print "Error: No argument provided\n%s:" % Script.scriptName
Script.showHelp()
gLogger.notice( "MetaDataDictionary: \n%s" % str( typeDict ) )
DIRAC.exit( -1 )
mq = MetaQuery( typeDict = typeDict )
result = mq.setMetaQuery( args )
if not result['OK']:
gLogger.error( "Illegal metaQuery:", result['Message'] )
DIRAC.exit( -1 )
metaDict = result['Value']
path = metaDict.pop( 'Path', path )
result = fc.findFilesByMetadata( metaDict, path )
if not result['OK']:
gLogger.error( 'Can not access File Catalog:', result['Message'] )
DIRAC.exit( -1 )
lfnList = sorted( result['Value'] )
gLogger.notice( '\n'.join( lfn for lfn in lfnList ) )
| Andrew-McNab-UK/DIRAC | DataManagementSystem/scripts/dirac-dms-find-lfns.py | Python | gpl-3.0 | 2,651 |
# -*- coding: utf-8 -*-
"""MacOS keychain database files."""
import collections
from dtfabric.runtime import data_maps as dtfabric_data_maps
from dtformats import data_format
from dtformats import errors
class KeychainDatabaseColumn(object):
"""MacOS keychain database column.
Attributes:
attribute_data_type (int): attribute (data) type.
attribute_identifier (int): attribute identifier.
attribute_name (str): attribute name.
"""
def __init__(self):
"""Initializes a MacOS keychain database column."""
super(KeychainDatabaseColumn, self).__init__()
self.attribute_data_type = None
self.attribute_identifier = None
self.attribute_name = None
class KeychainDatabaseTable(object):
"""MacOS keychain database table.
Attributes:
columns (list[KeychainDatabaseColumn]): columns.
records (list[dict[str, object]]): records.
relation_identifier (int): relation identifier.
relation_name (str): relation name.
"""
def __init__(self):
"""Initializes a MacOS keychain database table."""
super(KeychainDatabaseTable, self).__init__()
self.columns = []
self.records = []
self.relation_identifier = None
self.relation_name = None
class KeychainDatabaseFile(data_format.BinaryDataFile):
"""MacOS keychain database file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('keychain.yaml')
_RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO = 0x00000000
_RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES = 0x00000001
_RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES = 0x00000002
_TABLE_NAMES = {
0x00000000: 'CSSM_DL_DB_SCHEMA_INFO',
0x00000001: 'CSSM_DL_DB_SCHEMA_INDEXES',
0x00000002: 'CSSM_DL_DB_SCHEMA_ATTRIBUTES',
0x00000003: 'CSSM_DL_DB_SCHEMA_PARSING_MODULE',
0x0000000a: 'CSSM_DL_DB_RECORD_ANY',
0x0000000b: 'CSSM_DL_DB_RECORD_CERT',
0x0000000c: 'CSSM_DL_DB_RECORD_CRL',
0x0000000d: 'CSSM_DL_DB_RECORD_POLICY',
0x0000000e: 'CSSM_DL_DB_RECORD_GENERIC',
0x0000000f: 'CSSM_DL_DB_RECORD_PUBLIC_KEY',
0x00000010: 'CSSM_DL_DB_RECORD_PRIVATE_KEY',
0x00000011: 'CSSM_DL_DB_RECORD_SYMMETRIC_KEY',
0x00000012: 'CSSM_DL_DB_RECORD_ALL_KEYS',
0x80000000: 'CSSM_DL_DB_RECORD_GENERIC_PASSWORD',
0x80000001: 'CSSM_DL_DB_RECORD_INTERNET_PASSWORD',
0x80000002: 'CSSM_DL_DB_RECORD_APPLESHARE_PASSWORD',
0x80000003: 'CSSM_DL_DB_RECORD_USER_TRUST',
0x80000004: 'CSSM_DL_DB_RECORD_X509_CRL',
0x80000005: 'CSSM_DL_DB_RECORD_UNLOCK_REFERRAL',
0x80000006: 'CSSM_DL_DB_RECORD_EXTENDED_ATTRIBUTE',
0x80001000: 'CSSM_DL_DB_RECORD_X509_CERTIFICATE',
0x80008000: 'CSSM_DL_DB_RECORD_METADATA'}
_ATTRIBUTE_DATA_TYPES = {
0: 'CSSM_DB_ATTRIBUTE_FORMAT_STRING',
1: 'CSSM_DB_ATTRIBUTE_FORMAT_SINT32',
2: 'CSSM_DB_ATTRIBUTE_FORMAT_UINT32',
3: 'CSSM_DB_ATTRIBUTE_FORMAT_BIG_NUM',
4: 'CSSM_DB_ATTRIBUTE_FORMAT_REAL',
5: 'CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE',
6: 'CSSM_DB_ATTRIBUTE_FORMAT_BLOB',
7: 'CSSM_DB_ATTRIBUTE_FORMAT_MULTI_UINT32',
8: 'CSSM_DB_ATTRIBUTE_FORMAT_COMPLEX'}
_ATTRIBUTE_DATA_READ_FUNCTIONS = {
0: '_ReadAttributeValueString',
1: '_ReadAttributeValueInteger',
2: '_ReadAttributeValueInteger',
5: '_ReadAttributeValueDateTime',
6: '_ReadAttributeValueBinaryData'}
_DEBUG_INFO_FILE_HEADER = [
('signature', 'Signature', '_FormatStreamAsSignature'),
('major_format_version', 'Major format version',
'_FormatIntegerAsDecimal'),
('minor_format_version', 'Minor format version',
'_FormatIntegerAsDecimal'),
('data_size', 'Data size', '_FormatIntegerAsDecimal'),
('tables_array_offset', 'Tables array offset',
'_FormatIntegerAsHexadecimal8'),
('unknown1', 'Unknown1', '_FormatIntegerAsHexadecimal8')]
_DEBUG_INFO_RECORD_HEADER = [
('data_size', 'Data size', '_FormatIntegerAsDecimal'),
('record_index', 'Record index', '_FormatIntegerAsDecimal'),
('unknown2', 'Unknown2', '_FormatIntegerAsHexadecimal8'),
('unknown3', 'Unknown3', '_FormatIntegerAsHexadecimal8'),
('key_data_size', 'Key data size', '_FormatIntegerAsDecimal'),
('unknown4', 'Unknown4', '_FormatIntegerAsHexadecimal8')]
_DEBUG_INFO_TABLES_ARRAY = [
('data_size', 'Data size', '_FormatIntegerAsDecimal'),
('number_of_tables', 'Number of tables', '_FormatIntegerAsDecimal'),
('table_offsets', 'Table offsets', '_FormatTableOffsets')]
_DEBUG_INFO_TABLE_HEADER = [
('data_size', 'Data size', '_FormatIntegerAsDecimal'),
('record_type', 'Record type', '_FormatIntegerAsRecordType'),
('number_of_records', 'Number of records', '_FormatIntegerAsDecimal'),
('record_array_offset', 'Record array offset',
'_FormatIntegerAsHexadecimal8'),
('unknown1', 'Unknown1', '_FormatIntegerAsHexadecimal8'),
('unknown2', 'Unknown2', '_FormatIntegerAsHexadecimal8'),
('number_of_record_offsets', 'Number of record offsets',
'_FormatIntegerAsDecimal'),
('record_offsets', 'Record offsets', '_FormatRecordOffsets')]
_DEBUG_INFO_SCHEMA_INDEXES_RECORD_VALUES = [
('relation_identifier', 'Relation identifier',
'_FormatIntegerAsHexadecimal8'),
('index_identifier', 'Index identifier', '_FormatIntegerAsHexadecimal8'),
('attribute_identifier', 'Attribute identifier',
'_FormatIntegerAsHexadecimal8'),
('index_type', 'Index type', '_FormatIntegerAsHexadecimal8'),
('index_data_location', 'Index data location',
'_FormatIntegerAsHexadecimal8')]
def __init__(self, debug=False, output_writer=None):
"""Initializes a MacOS keychain database file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(KeychainDatabaseFile, self).__init__(
debug=debug, output_writer=output_writer)
self._tables = collections.OrderedDict()
@property
def tables(self):
"""list[KeychainDatabaseTable]: tables."""
return self._tables.values()
def _FormatRecordOffsets(self, array_of_integers):
"""Formats the record offsets.
Args:
array_of_integers (list[int]): array of integers.
Returns:
str: formatted record offsets.
"""
lines = []
for index, record_offset in enumerate(array_of_integers):
description_string = 'Record offset: {0:d}'.format(index)
value_string = self._FormatIntegerAsHexadecimal8(record_offset)
line = self._FormatValue(description_string, value_string)
lines.append(line)
return ''.join(lines)
def _FormatIntegerAsRecordValue(self, integer):
"""Formats an integer as a record value.
Args:
integer (int): integer.
Returns:
str: integer formatted as record value.
"""
if integer is None:
return 'NULL'
return self._FormatIntegerAsHexadecimal8(integer)
def _FormatIntegerAsRecordType(self, integer):
"""Formats an integer as a record type.
Args:
integer (int): integer.
Returns:
str: integer formatted as record type.
"""
table_name = self._TABLE_NAMES.get(integer, 'UNKNOWN')
return '0x{0:08x} ({1:s})'.format(integer, table_name)
def _FormatStreamAsSignature(self, stream):
"""Formats a stream as a signature.
Args:
stream (bytes): stream.
Returns:
str: stream formatted as a signature.
"""
return stream.decode('ascii')
def _FormatTableOffsets(self, array_of_integers):
"""Formats the table offsets.
Args:
array_of_integers (list[int]): array of integers.
Returns:
str: formatted table offsets.
"""
lines = []
for index, table_offset in enumerate(array_of_integers):
description_string = 'Table offset: {0:d}'.format(index)
value_string = self._FormatIntegerAsHexadecimal8(table_offset)
line = self._FormatValue(description_string, value_string)
lines.append(line)
return ''.join(lines)
def _ReadAttributeValueBinaryData(
self, attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offset, description):
"""Reads a binary data attribute value.
Args:
attribute_values_data (bytes): attribute values data.
record_offset (int): offset of the record relative to the start of
the file.
attribute_values_data_offset (int): offset of the attribute values data
relative to the start of the record.
attribute_value_offset (int): offset of the attribute relative to
the start of the record.
description (str): description of the attribute value.
Returns:
bytes: binary data value or None if attribute value offset is not set.
Raises:
ParseError: if the attribute value cannot be read.
"""
if attribute_value_offset == 0:
return None
data_type_map = self._GetDataTypeMap('keychain_blob')
file_offset = (
record_offset + attribute_values_data_offset + attribute_value_offset)
attribute_value_offset -= attribute_values_data_offset + 1
attribute_value_data = attribute_values_data[attribute_value_offset:]
string_attribute_value = self._ReadStructureFromByteStream(
attribute_value_data, file_offset, data_type_map, description)
return repr(string_attribute_value.blob)
def _ReadAttributeValueDateTime(
self, attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offset, description):
"""Reads a date time attribute value.
Args:
attribute_values_data (bytes): attribute values data.
record_offset (int): offset of the record relative to the start of
the file.
attribute_values_data_offset (int): offset of the attribute values data
relative to the start of the record.
attribute_value_offset (int): offset of the attribute relative to
the start of the record.
description (str): description of the attribute value.
Returns:
str: date and time values.
Raises:
ParseError: if the attribute value cannot be read.
"""
if attribute_value_offset == 0:
return None
data_type_map = self._GetDataTypeMap('keychain_date_time')
file_offset = (
record_offset + attribute_values_data_offset + attribute_value_offset)
attribute_value_offset -= attribute_values_data_offset + 1
attribute_value_data = attribute_values_data[attribute_value_offset:]
date_time_attribute_value = self._ReadStructureFromByteStream(
attribute_value_data, file_offset, data_type_map, description)
return date_time_attribute_value.date_time.rstrip('\x00')
def _ReadAttributeValueInteger(
self, attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offset, description):
"""Reads an integer attribute value.
Args:
attribute_values_data (bytes): attribute values data.
record_offset (int): offset of the record relative to the start of
the file.
attribute_values_data_offset (int): offset of the attribute values data
relative to the start of the record.
attribute_value_offset (int): offset of the attribute relative to
the start of the record.
description (str): description of the attribute value.
Returns:
int: integer value or None if attribute value offset is not set.
Raises:
ParseError: if the attribute value cannot be read.
"""
if attribute_value_offset == 0:
return None
data_type_map = self._GetDataTypeMap('uint32be')
file_offset = (
record_offset + attribute_values_data_offset + attribute_value_offset)
attribute_value_offset -= attribute_values_data_offset + 1
attribute_value_data = attribute_values_data[attribute_value_offset:]
return self._ReadStructureFromByteStream(
attribute_value_data, file_offset, data_type_map, description)
def _ReadAttributeValueString(
self, attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offset, description):
"""Reads a string attribute value.
Args:
attribute_values_data (bytes): attribute values data.
record_offset (int): offset of the record relative to the start of
the file.
attribute_values_data_offset (int): offset of the attribute values data
relative to the start of the record.
attribute_value_offset (int): offset of the attribute relative to
the start of the record.
description (str): description of the attribute value.
Returns:
str: string value or None if attribute value offset is not set.
Raises:
ParseError: if the attribute value cannot be read.
"""
if attribute_value_offset == 0:
return None
data_type_map = self._GetDataTypeMap('keychain_string')
file_offset = (
record_offset + attribute_values_data_offset + attribute_value_offset)
attribute_value_offset -= attribute_values_data_offset + 1
attribute_value_data = attribute_values_data[attribute_value_offset:]
string_attribute_value = self._ReadStructureFromByteStream(
attribute_value_data, file_offset, data_type_map, description)
return string_attribute_value.string
def _ReadFileHeader(self, file_object):
"""Reads the file header.
Args:
file_object (file): file-like object.
Returns:
keychain_file_header: file header.
Raises:
ParseError: if the file header cannot be read.
"""
data_type_map = self._GetDataTypeMap('keychain_file_header')
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, data_type_map, 'file header')
if self._debug:
self._DebugPrintStructureObject(file_header, self._DEBUG_INFO_FILE_HEADER)
return file_header
def _ReadRecord(self, tables, file_object, record_offset, record_type):
"""Reads the record.
Args:
tables (dict[str, KeychainDatabaseTable]): tables per name.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
record_type (int): record type, which should correspond to a relation
identifier of a table defined in the schema.
Raises:
ParseError: if the record cannot be read.
"""
table = tables.get(record_type, None)
if not table:
raise errors.ParseError(
'Missing table for relation identifier: 0x{0:08}'.format(record_type))
record_header = self._ReadRecordHeader(file_object, record_offset)
record = collections.OrderedDict()
if table.columns:
number_of_columns = len(table.columns)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, number_of_columns)
file_offset = file_object.tell()
record_data_offset = file_offset - record_offset
record_data_size = record_header.data_size
record_data = file_object.read(record_data_size - record_data_offset)
if self._debug:
if record_header.key_data_size > 0:
self._DebugPrintData(
'Key data', record_data[:record_header.key_data_size])
self._DebugPrintData(
'Attribute values data', record_data[record_header.key_data_size:])
data_offsets = [
offset - record_data_offset - 1
for offset in sorted(attribute_value_offsets)
if offset > record_data_offset]
data_offsets.append(record_data_size - record_data_offset)
data_offsets.pop(0)
for index, column in enumerate(table.columns):
if self._debug:
if attribute_value_offsets[index] == 0:
attribute_value_offset = 0
attribute_value_end_offset = 0
else:
attribute_value_offset = (
attribute_value_offsets[index] - record_data_offset - 1)
attribute_value_end_offset = data_offsets[0]
while attribute_value_end_offset <= attribute_value_offset:
data_offsets.pop(0)
attribute_value_end_offset = data_offsets[0]
description = 'Attribute value: {0:d} ({1:s}) data'.format(
index, column.attribute_name)
self._DebugPrintData(description, record_data[
attribute_value_offset:attribute_value_end_offset])
attribute_data_read_function = self._ATTRIBUTE_DATA_READ_FUNCTIONS.get(
column.attribute_data_type, None)
if attribute_data_read_function:
attribute_data_read_function = getattr(
self, attribute_data_read_function, None)
if not attribute_data_read_function:
attribute_value = None
else:
attribute_value = attribute_data_read_function(
record_data, record_offset, record_data_offset,
attribute_value_offsets[index], column.attribute_name)
record[column.attribute_name] = attribute_value
table.records.append(record)
def _ReadRecordAttributeValueOffset(
self, file_object, file_offset, number_of_attribute_values):
"""Reads the record attribute value offsets.
Args:
file_object (file): file-like object.
file_offset (int): offset of the record attribute values offsets relative
to the start of the file.
number_of_attribute_values (int): number of attribute values.
Returns:
keychain_record_attribute_value_offsets: record attribute value offsets.
Raises:
ParseError: if the record attribute value offsets cannot be read.
"""
offsets_data_size = number_of_attribute_values * 4
offsets_data = file_object.read(offsets_data_size)
if self._debug:
self._DebugPrintData('Attribute value offsets data', offsets_data)
data_type_map = self._GetDataTypeMap(
'keychain_record_attribute_value_offsets')
context = dtfabric_data_maps.DataTypeMapContext(values={
'number_of_attribute_values': number_of_attribute_values})
attribute_value_offsets = self._ReadStructureFromByteStream(
offsets_data, file_offset, data_type_map,
'record attribute value offsets', context=context)
if self._debug:
for index, attribute_value_offset in enumerate(attribute_value_offsets):
description_string = 'Attribute value offset: {0:d}'.format(index)
value_string = self._FormatIntegerAsHexadecimal8(attribute_value_offset)
self._DebugPrintValue(description_string, value_string)
self._DebugPrintText('\n')
return attribute_value_offsets
def _ReadRecordHeader(self, file_object, record_header_offset):
"""Reads the record header.
Args:
file_object (file): file-like object.
record_header_offset (int): offset of the record header relative to
the start of the file.
Returns:
keychain_record_header: record header.
Raises:
ParseError: if the record header cannot be read.
"""
data_type_map = self._GetDataTypeMap('keychain_record_header')
record_header, _ = self._ReadStructureFromFileObject(
file_object, record_header_offset, data_type_map, 'record header')
if self._debug:
self._DebugPrintStructureObject(
record_header, self._DEBUG_INFO_RECORD_HEADER)
return record_header
def _ReadRecordSchemaAttributes(self, tables, file_object, record_offset):
"""Reads a schema attributes (CSSM_DL_DB_SCHEMA_ATTRIBUTES) record.
Args:
tables (dict[str, KeychainDatabaseTable]): tables per name.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
"""
record_header = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 6)
file_offset = file_object.tell()
attribute_values_data_offset = file_offset - record_offset
attribute_values_data_size = record_header.data_size - (
file_offset - record_offset)
attribute_values_data = file_object.read(attribute_values_data_size)
if self._debug:
self._DebugPrintData('Attribute values data', attribute_values_data)
relation_identifier = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[0], 'relation identifier')
if self._debug:
if relation_identifier is None:
value_string = 'NULL'
else:
table_name = self._TABLE_NAMES.get(relation_identifier, 'UNKNOWN')
value_string = '0x{0:08x} ({1:s})'.format(
relation_identifier, table_name)
self._DebugPrintValue('Relation identifier', value_string)
attribute_identifier = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[1], 'attribute identifier')
if self._debug:
value_string = self._FormatIntegerAsRecordValue(attribute_identifier)
self._DebugPrintValue('Attribute identifier', value_string)
attribute_name_data_type = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[2], 'attribute name data type')
if self._debug:
if attribute_name_data_type is None:
value_string = 'NULL'
else:
data_type_string = self._ATTRIBUTE_DATA_TYPES.get(
attribute_name_data_type, 'UNKNOWN')
value_string = '{0:d} ({1:s})'.format(
attribute_name_data_type, data_type_string)
self._DebugPrintValue('Attribute name data type', value_string)
attribute_name = self._ReadAttributeValueString(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[3], 'attribute name')
if self._debug:
if attribute_name is None:
value_string = 'NULL'
else:
value_string = attribute_name
self._DebugPrintValue('Attribute name', value_string)
# TODO: add support for AttributeNameID
attribute_data_type = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[5], 'attribute data type')
if self._debug:
if attribute_data_type is None:
value_string = 'NULL'
else:
data_type_string = self._ATTRIBUTE_DATA_TYPES.get(
attribute_data_type, 'UNKNOWN')
value_string = '{0:d} ({1:s})'.format(
attribute_data_type, data_type_string)
self._DebugPrintValue('Attribute data type', value_string)
if self._debug:
self._DebugPrintText('\n')
table = tables.get(relation_identifier, None)
if not table:
raise errors.ParseError(
'Missing table for relation identifier: 0x{0:08}'.format(
relation_identifier))
# TODO: map attribute identifier to module specific names?
if attribute_name is None and attribute_value_offsets[1] != 0:
attribute_value_offset = attribute_value_offsets[1]
attribute_value_offset -= attribute_values_data_offset + 1
attribute_name = attribute_values_data[
attribute_value_offset:attribute_value_offset + 4]
attribute_name = attribute_name.decode('ascii')
column = KeychainDatabaseColumn()
column.attribute_data_type = attribute_data_type
column.attribute_identifier = attribute_identifier
column.attribute_name = attribute_name
table.columns.append(column)
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_ATTRIBUTES table.')
record = collections.OrderedDict({
'RelationID': relation_identifier,
'AttributeID': attribute_identifier,
'AttributeNameFormat': attribute_name_data_type,
'AttributeName': attribute_name,
'AttributeFormat': attribute_data_type})
table.records.append(record)
def _ReadRecordSchemaIndexes(self, tables, file_object, record_offset):
"""Reads a schema indexes (CSSM_DL_DB_SCHEMA_INDEXES) record.
Args:
tables (dict[str, KeychainDatabaseTable]): tables per name.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
"""
record_header = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 5)
if attribute_value_offsets != (0x2d, 0x31, 0x35, 0x39, 0x3d):
raise errors.ParseError('Unuspported record attribute value offsets')
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('keychain_record_schema_indexes')
record_values, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map,
'schema indexes record values')
if self._debug:
self._DebugPrintStructureObject(
record_values, self._DEBUG_INFO_SCHEMA_INDEXES_RECORD_VALUES)
if self._debug:
file_offset = file_object.tell()
trailing_data_size = record_header.data_size - (
file_offset - record_offset)
if trailing_data_size == 0:
self._DebugPrintText('\n')
else:
trailing_data = file_object.read(trailing_data_size)
self._DebugPrintData('Record trailing data', trailing_data)
if record_values.relation_identifier not in tables:
raise errors.ParseError(
'CSSM_DL_DB_SCHEMA_INDEXES defines relation identifier not defined '
'in CSSM_DL_DB_SCHEMA_INFO.')
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INDEXES table.')
record = collections.OrderedDict({
'RelationID': record_values.relation_identifier,
'IndexID': record_values.index_identifier,
'AttributeID': record_values.attribute_identifier,
'IndexType': record_values.index_type,
'IndexedDataLocation': record_values.index_data_location})
table.records.append(record)
def _ReadRecordSchemaInformation(self, tables, file_object, record_offset):
"""Reads a schema information (CSSM_DL_DB_SCHEMA_INFO) record.
Args:
tables (dict[str, KeychainDatabaseTable]): tables per name.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
"""
record_header = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 2)
if attribute_value_offsets != (0x21, 0x25):
raise errors.ParseError('Unuspported record attribute value offsets')
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('keychain_record_schema_information')
record_values, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map,
'schema information record values')
relation_name = record_values.relation_name.decode('ascii')
if self._debug:
value_string = '0x{0:08x}'.format(record_values.relation_identifier)
self._DebugPrintValue('Relation identifier', value_string)
value_string = '{0:d}'.format(record_values.relation_name_size)
self._DebugPrintValue('Relation name size', value_string)
self._DebugPrintValue('Relation name', relation_name)
if self._debug:
file_offset = file_object.tell()
trailing_data_size = record_header.data_size - (
file_offset - record_offset)
if trailing_data_size == 0:
self._DebugPrintText('\n')
else:
trailing_data = file_object.read(trailing_data_size)
self._DebugPrintData('Record trailing data', trailing_data)
table = KeychainDatabaseTable()
table.relation_identifier = record_values.relation_identifier
table.relation_name = relation_name
tables[table.relation_identifier] = table
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INFO table.')
record = collections.OrderedDict({
'RelationID': record_values.relation_identifier,
'RelationName': relation_name})
table.records.append(record)
def _ReadTable(self, tables, file_object, table_offset):
"""Reads the table.
Args:
tables (dict[str, KeychainDatabaseTable]): tables per name.
file_object (file): file-like object.
table_offset (int): offset of the table relative to the start of
the file.
Raises:
ParseError: if the table cannot be read.
"""
table_header = self._ReadTableHeader(file_object, table_offset)
for record_offset in table_header.record_offsets:
if record_offset == 0:
continue
record_offset += table_offset
if table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO:
self._ReadRecordSchemaInformation(tables, file_object, record_offset)
elif table_header.record_type == (
self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES):
self._ReadRecordSchemaIndexes(tables, file_object, record_offset)
elif table_header.record_type == (
self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES):
self._ReadRecordSchemaAttributes(tables, file_object, record_offset)
else:
self._ReadRecord(
tables, file_object, record_offset, table_header.record_type)
if self._debug:
file_offset = file_object.tell()
trailing_data_size = table_header.data_size - (file_offset - table_offset)
if trailing_data_size != 0:
trailing_data = file_object.read(trailing_data_size)
self._DebugPrintData('Table trailing data', trailing_data)
def _ReadTableHeader(self, file_object, table_header_offset):
"""Reads the table header.
Args:
file_object (file): file-like object.
table_header_offset (int): offset of the table header relative to
the start of the file.
Returns:
keychain_table_header: table header.
Raises:
ParseError: if the table header cannot be read.
"""
data_type_map = self._GetDataTypeMap('keychain_table_header')
table_header, _ = self._ReadStructureFromFileObject(
file_object, table_header_offset, data_type_map, 'table header')
if self._debug:
self._DebugPrintStructureObject(
table_header, self._DEBUG_INFO_TABLE_HEADER)
return table_header
def _ReadTablesArray(self, file_object, tables_array_offset):
"""Reads the tables array.
Args:
file_object (file): file-like object.
tables_array_offset (int): offset of the tables array relative to
the start of the file.
Returns:
dict[str, KeychainDatabaseTable]: tables per name.
Raises:
ParseError: if the tables array cannot be read.
"""
# TODO: implement https://github.com/libyal/dtfabric/issues/12 and update
# keychain_tables_array definition.
data_type_map = self._GetDataTypeMap('keychain_tables_array')
tables_array, _ = self._ReadStructureFromFileObject(
file_object, tables_array_offset, data_type_map, 'tables array')
if self._debug:
self._DebugPrintStructureObject(
tables_array, self._DEBUG_INFO_TABLES_ARRAY)
tables = collections.OrderedDict()
for table_offset in tables_array.table_offsets:
self._ReadTable(tables, file_object, tables_array_offset + table_offset)
return tables
def ReadFileObject(self, file_object):
"""Reads a MacOS keychain database file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
file_header = self._ReadFileHeader(file_object)
self._tables = self._ReadTablesArray(
file_object, file_header.tables_array_offset)
| libyal/dtformats | dtformats/keychain.py | Python | apache-2.0 | 32,572 |
"""
SARIMAX Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from statsmodels.compat.python import long
from warnings import warn
import numpy as np
import pandas as pd
from .kalman_filter import KalmanFilter
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate, solve_discrete_lyapunov
)
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ValueWarning
import statsmodels.base.wrapper as wrap
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but reuslts in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
seasonal_periods : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.seasonal_periods = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], (int, long, np.integer)):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], (int, long, np.integer)):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], (int, long, np.integer)):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.seasonal_periods * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
tmp = (i + 1) * self.seasonal_periods
self.polynomial_seasonal_ar[tmp] = seasonal_order[0][i]
if isinstance(seasonal_order[2], (int, long, np.integer)):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.seasonal_periods * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
tmp = (i + 1) * self.seasonal_periods
self.polynomial_seasonal_ma[tmp] = seasonal_order[2][i]
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
# Handle time-varying regression
if self.time_varying_regression:
self._k_order = 0
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim < 2:
if not exog_is_using_pandas:
exog = np.atleast_2d(exog).T
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += (self.seasonal_periods * self._k_seasonal_diff +
self._k_diff)
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if (self.simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.seasonal_periods * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Set as time-varying model if we have time-trend or exog
if self.k_exog > 0 or len(self.polynomial_trend) > 1:
self.ssm._time_invariant = False
# Handle kwargs specified initialization
if self.ssm.initialization is not None:
self._manual_initialization = True
# Initialize the fixed components of the statespace model
self.ssm['design'] = self.initial_design
self.ssm['state_intercept'] = self.initial_state_intercept
self.ssm['transition'] = self.initial_transition
self.ssm['selection'] = self.initial_selection
# If we are estimating a simple ARMA model, then we can use a faster
# initialization method (unless initialization was already specified).
if k_diffuse_states == 0 and not self._manual_initialization:
self.initialize_stationary()
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
def _get_init_kwds(self):
kwds = super(SARIMAX, self)._get_init_kwds()
for key, value in kwds.items():
if value is None and hasattr(self.ssm, key):
kwds[key] = getattr(self.ssm, key)
return kwds
def prepare_data(self):
endog, exog = super(SARIMAX, self).prepare_data()
# Perform simple differencing if requested
if (self.simple_differencing and
(self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):
# Save the original length
orig_length = endog.shape[0]
# Perform simple differencing
endog = diff(endog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.seasonal_periods)
if exog is not None:
exog = diff(exog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.seasonal_periods)
# Reset the ModelData datasets and cache
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(endog, exog))
# Reset indexes, if provided
new_length = self.data.endog.shape[0]
if self.data.row_labels is not None:
self.data._cache['row_labels'] = (
self.data.row_labels[orig_length - new_length:])
if self._index is not None:
if self._index_generated:
self._index = self._index[:-(orig_length - new_length)]
else:
self._index = self._index[orig_length - new_length:]
# Reset the nobs
self.nobs = endog.shape[0]
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
return endog, exog
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
super(SARIMAX, self).initialize()
# Internal flag for whether the default mixed approximate diffuse /
# stationary initialization has been overridden with a user-supplied
# initialization
self._manual_initialization = False
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_known(self, initial_state, initial_state_cov):
self._manual_initialization = True
self.ssm.initialize_known(initial_state, initial_state_cov)
initialize_known.__doc__ = KalmanFilter.initialize_known.__doc__
def initialize_approximate_diffuse(self, variance=None):
self._manual_initialization = True
self.ssm.initialize_approximate_diffuse(variance)
initialize_approximate_diffuse.__doc__ = (
KalmanFilter.initialize_approximate_diffuse.__doc__
)
def initialize_stationary(self):
self._manual_initialization = True
self.ssm.initialize_stationary()
initialize_stationary.__doc__ = (
KalmanFilter.initialize_stationary.__doc__
)
def initialize_state(self, variance=None, complex_step=False):
"""
Initialize state and state covariance arrays in preparation for the
Kalman filter.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
can be found in the Representation class documentation.
Notes
-----
Initializes the ARMA component of the state space to the typical
stationary values and the other components as approximate diffuse.
Can be overridden be calling one of the other initialization methods
before fitting the model.
"""
# Check if a manual initialization has already been specified
if self._manual_initialization:
return
# If we're not enforcing stationarity, then we can't initialize a
# stationary component
if not self.enforce_stationarity:
self.initialize_approximate_diffuse(variance)
return
# Otherwise, create the initial state and state covariance matrix
# as from a combination of diffuse and stationary components
# Create initialized non-stationary components
if variance is None:
variance = self.ssm.initial_variance
dtype = self.ssm.transition.dtype
initial_state = np.zeros(self.k_states, dtype=dtype)
initial_state_cov = np.eye(self.k_states, dtype=dtype) * variance
# Get the offsets (from the bottom or bottom right of the vector /
# matrix) for the stationary component.
if self.state_regression:
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
start = -self._k_order
end = None
# Add in the initialized stationary components
if self._k_order > 0:
selection_stationary = self.ssm['selection', start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm['state_cov', :, :, 0]),
selection_stationary.T
)
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm['transition', start:end, start:end, 0],
selected_state_cov_stationary,
complex_step=complex_step
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.seasonal_periods - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
if len(design) == 0:
design = np.r_[0]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
if self._k_order > 0:
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.seasonal_periods).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.seasonal_periods
end = self._k_diff + (d + 1) * self.seasonal_periods
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.seasonal_periods - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.seasonal_periods > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.seasonal_periods - 1) + [1]) *
self._k_seasonal_diff)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
if len(selection) == 0:
selection = np.zeros((self.k_states, self.k_posdef))
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
def filter(self, params, **kwargs):
kwargs.setdefault('results_class', SARIMAXResults)
kwargs.setdefault('results_wrapper_class', SARIMAXResultsWrapper)
return super(SARIMAX, self).filter(params, **kwargs)
def smooth(self, params, **kwargs):
kwargs.setdefault('results_class', SARIMAXResults)
kwargs.setdefault('results_wrapper_class', SARIMAXResultsWrapper)
return super(SARIMAX, self).smooth(params, **kwargs)
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params_ar)
# Run an ARMA(p,q) model using the just computed residuals as data
Y = endog[r:]
X = np.empty((Y.shape[0], 0))
if k_trend > 0:
if trend_data is None:
raise ValueError('Trend data must be provided if'
' `k_trend` > 0.')
X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]
if k_ar > 0:
cols = polynomial_ar.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]
if k_ma > 0:
cols = polynomial_ma.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]
# Get the array of [ar_params, ma_params]
params = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params)
# Default output
params_trend = []
params_ar = []
params_ma = []
params_variance = []
# Get the params
offset = 0
if k_trend > 0:
params_trend = params[offset:k_trend + offset]
offset += k_trend
if k_ar > 0:
params_ar = params[offset:k_params_ar + offset]
offset += k_params_ar
if k_ma > 0:
params_ma = params[offset:k_params_ma + offset]
offset += k_params_ma
if residuals is not None:
params_variance = (residuals[k_params_ma:]**2).mean()
return (params_trend, params_ar, params_ma,
params_variance)
@property
def start_params(self):
"""
Starting parameters for maximum likelihood estimation
"""
# Perform differencing if necessary (i.e. if simple differencing is
# false so that the state-space model will use the entire dataset)
trend_data = self._trend_data
if not self.simple_differencing and (
self._k_diff > 0 or self._k_seasonal_diff > 0):
endog = diff(self.endog, self._k_diff,
self._k_seasonal_diff, self.seasonal_periods)
if self.exog is not None:
exog = diff(self.exog, self._k_diff,
self._k_seasonal_diff, self.seasonal_periods)
else:
exog = None
trend_data = trend_data[:endog.shape[0], :]
else:
endog = self.endog.copy()
exog = self.exog.copy() if self.exog is not None else None
endog = endog.squeeze()
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
mask = ~np.isnan(endog).squeeze()
endog = endog[mask]
if exog is not None:
exog = exog[mask]
if trend_data is not None:
trend_data = trend_data[mask]
# Regression effects via OLS
params_exog = []
if self.k_exog > 0:
params_exog = np.linalg.pinv(exog).dot(endog)
endog = endog - np.dot(exog, params_exog)
if self.state_regression:
params_exog = []
# Non-seasonal ARMA component and trend
(params_trend, params_ar, params_ma,
params_variance) = self._conditional_sum_squares(
endog, self.k_ar, self.polynomial_ar, self.k_ma,
self.polynomial_ma, self.k_trend, trend_data
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_ar = (
self.k_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_ar])
)
if invalid_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_ma = (
self.k_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_ma])
)
if invalid_ma:
raise ValueError('non-invertible starting MA parameters found'
' with `enforce_invertibility` set to True.')
# Seasonal Parameters
_, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (
self._conditional_sum_squares(
endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,
self.k_seasonal_ma, self.polynomial_seasonal_ma
)
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_seasonal_ar = (
self.k_seasonal_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_seasonal_ar])
)
if invalid_seasonal_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_seasonal_ma = (
self.k_seasonal_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_seasonal_ma])
)
if invalid_seasonal_ma:
raise ValueError('non-invertible starting seasonal moving average'
' parameters found with `enforce_invertibility`'
' set to True.')
# Variances
params_exog_variance = []
if self.state_regression and self.time_varying_regression:
# TODO how to set the initial variance parameters?
params_exog_variance = [1] * self.k_exog
if self.state_error and params_variance == []:
if not params_seasonal_variance == []:
params_variance = params_seasonal_variance
elif self.k_exog > 0:
params_variance = np.inner(endog, endog)
else:
params_variance = np.inner(endog, endog) / self.nobs
params_measurement_variance = 1 if self.measurement_error else []
# Combine all parameters
return np.r_[
params_trend,
params_exog,
params_ar,
params_ma,
params_seasonal_ar,
params_seasonal_ma,
params_exog_variance,
params_measurement_variance,
params_variance
]
@property
def endog_names(self, latex=False):
"""Names of endogenous variables"""
diff = ''
if self.k_diff > 0:
if self.k_diff == 1:
diff = '\Delta' if latex else 'D'
else:
diff = ('\Delta^%d' if latex else 'D%d') % self.k_diff
seasonal_diff = ''
if self.k_seasonal_diff > 0:
if self.k_seasonal_diff == 1:
seasonal_diff = (('\Delta_%d' if latex else 'DS%d') %
(self.seasonal_periods))
else:
seasonal_diff = (('\Delta_%d^%d' if latex else 'D%dS%d') %
(self.k_seasonal_diff, self.seasonal_periods))
endog_diff = self.simple_differencing
if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:
return (('%s%s %s' if latex else '%s.%s.%s') %
(diff, seasonal_diff, self.data.ynames))
elif endog_diff and self.k_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(diff, self.data.ynames))
elif endog_diff and self.k_seasonal_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(seasonal_diff, self.data.ynames))
else:
return self.data.ynames
params_complete = [
'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',
'exog_variance', 'measurement_variance', 'variance'
]
@property
def param_terms(self):
"""
List of parameters actually included in the model, in sorted order.
TODO Make this an OrderedDict with slice or indices as the values.
"""
model_orders = self.model_orders
# Get basic list from model orders
params = [
order for order in self.params_complete
if model_orders[order] > 0
]
# k_exog may be positive without associated parameters if it is in the
# state vector
if 'exog' in params and not self.mle_regression:
params.remove('exog')
return params
@property
def param_names(self):
"""
List of human readable parameter names (for parameters actually
included in the model).
"""
params_sort_order = self.param_terms
model_names = self.model_names
return [
name for param in params_sort_order for name in model_names[param]
]
@property
def model_orders(self):
"""
The orders of each of the polynomials in the model.
"""
return {
'trend': self.k_trend,
'exog': self.k_exog,
'ar': self.k_ar,
'ma': self.k_ma,
'seasonal_ar': self.k_seasonal_ar,
'seasonal_ma': self.k_seasonal_ma,
'reduced_ar': self.k_ar + self.k_seasonal_ar,
'reduced_ma': self.k_ma + self.k_seasonal_ma,
'exog_variance': self.k_exog if (
self.state_regression and self.time_varying_regression) else 0,
'measurement_variance': int(self.measurement_error),
'variance': int(self.state_error),
}
@property
def model_names(self):
"""
The plain text names of all possible model parameters.
"""
return self._get_model_names(latex=False)
@property
def model_latex_names(self):
"""
The latex names of all possible model parameters.
"""
return self._get_model_names(latex=True)
def _get_model_names(self, latex=False):
names = {
'trend': None,
'exog': None,
'ar': None,
'ma': None,
'seasonal_ar': None,
'seasonal_ma': None,
'reduced_ar': None,
'reduced_ma': None,
'exog_variance': None,
'measurement_variance': None,
'variance': None,
}
# Trend
if self.k_trend > 0:
trend_template = 't_%d' if latex else 'trend.%d'
names['trend'] = []
for i in self.polynomial_trend.nonzero()[0]:
if i == 0:
names['trend'].append('intercept')
elif i == 1:
names['trend'].append('drift')
else:
names['trend'].append(trend_template % i)
# Exogenous coefficients
if self.k_exog > 0:
names['exog'] = self.exog_names
# Autoregressive
if self.k_ar > 0:
ar_template = '$\\phi_%d$' if latex else 'ar.L%d'
names['ar'] = []
for i in self.polynomial_ar.nonzero()[0][1:]:
names['ar'].append(ar_template % i)
# Moving Average
if self.k_ma > 0:
ma_template = '$\\theta_%d$' if latex else 'ma.L%d'
names['ma'] = []
for i in self.polynomial_ma.nonzero()[0][1:]:
names['ma'].append(ma_template % i)
# Seasonal Autoregressive
if self.k_seasonal_ar > 0:
seasonal_ar_template = (
'$\\tilde \\phi_%d$' if latex else 'ar.S.L%d'
)
names['seasonal_ar'] = []
for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:
names['seasonal_ar'].append(seasonal_ar_template % i)
# Seasonal Moving Average
if self.k_seasonal_ma > 0:
seasonal_ma_template = (
'$\\tilde \\theta_%d$' if latex else 'ma.S.L%d'
)
names['seasonal_ma'] = []
for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:
names['seasonal_ma'].append(seasonal_ma_template % i)
# Reduced Form Autoregressive
if self.k_ar > 0 or self.k_seasonal_ar > 0:
reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
ar_template = '$\\Phi_%d$' if latex else 'ar.R.L%d'
names['reduced_ar'] = []
for i in reduced_polynomial_ar.nonzero()[0][1:]:
names['reduced_ar'].append(ar_template % i)
# Reduced Form Moving Average
if self.k_ma > 0 or self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
ma_template = '$\\Theta_%d$' if latex else 'ma.R.L%d'
names['reduced_ma'] = []
for i in reduced_polynomial_ma.nonzero()[0][1:]:
names['reduced_ma'].append(ma_template % i)
# Exogenous variances
if self.state_regression and self.time_varying_regression:
exog_var_template = '$\\sigma_\\text{%s}^2$' if latex else 'var.%s'
names['exog_variance'] = [
exog_var_template % exog_name for exog_name in self.exog_names
]
# Measurement error variance
if self.measurement_error:
meas_var_tpl = (
'$\\sigma_\\eta^2$' if latex else 'var.measurement_error'
)
names['measurement_variance'] = [meas_var_tpl]
# State variance
if self.state_error:
var_tpl = '$\\sigma_\\zeta^2$' if latex else 'sigma2'
names['variance'] = [var_tpl]
return names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Used primarily to enforce stationarity of the autoregressive lag
polynomial, invertibility of the moving average lag polynomial, and
positive variance parameters.
Parameters
----------
unconstrained : array_like
Unconstrained parameters used by the optimizer.
Returns
-------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
unconstrained = np.array(unconstrained, ndmin=1)
constrained = np.zeros(unconstrained.shape, unconstrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
constrained[start:end] = unconstrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ma_params
# Transform the standard deviation parameters to be positive
if self.state_regression and self.time_varying_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]**2
start += self.k_exog
if self.measurement_error:
constrained[start] = unconstrained[start]**2
start += 1
end += 1
if self.state_error:
constrained[start] = unconstrained[start]**2
# start += 1
# end += 1
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Used primarily to reverse enforcement of stationarity of the
autoregressive lag polynomial and invertibility of the moving average
lag polynomial.
Parameters
----------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Returns
-------
constrained : array_like
Unconstrained parameters used by the optimizer.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
constrained = np.array(constrained, ndmin=1)
unconstrained = np.zeros(constrained.shape, constrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
unconstrained[start:end] = constrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ma_params
# Untransform the standard deviation
if self.state_regression and self.time_varying_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]**0.5
start += self.k_exog
if self.measurement_error:
unconstrained[start] = constrained[start]**0.5
start += 1
end += 1
if self.state_error:
unconstrained[start] = constrained[start]**0.5
# start += 1
# end += 1
return unconstrained
def update(self, params, transformed=True, complex_step=False):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
params = super(SARIMAX, self).update(params, transformed=transformed,
complex_step=False)
params_trend = None
params_exog = None
params_ar = None
params_ma = None
params_seasonal_ar = None
params_seasonal_ma = None
params_exog_variance = None
params_measurement_variance = None
params_variance = None
# Extract the parameters
start = end = 0
end += self.k_trend
params_trend = params[start:end]
start += self.k_trend
if self.mle_regression:
end += self.k_exog
params_exog = params[start:end]
start += self.k_exog
end += self.k_ar_params
params_ar = params[start:end]
start += self.k_ar_params
end += self.k_ma_params
params_ma = params[start:end]
start += self.k_ma_params
end += self.k_seasonal_ar_params
params_seasonal_ar = params[start:end]
start += self.k_seasonal_ar_params
end += self.k_seasonal_ma_params
params_seasonal_ma = params[start:end]
start += self.k_seasonal_ma_params
if self.state_regression and self.time_varying_regression:
end += self.k_exog
params_exog_variance = params[start:end]
start += self.k_exog
if self.measurement_error:
params_measurement_variance = params[start]
start += 1
end += 1
if self.state_error:
params_variance = params[start]
# start += 1
# end += 1
# Update lag polynomials
if self.k_ar > 0:
if self.polynomial_ar.dtype == params.dtype:
self.polynomial_ar[self._polynomial_ar_idx] = -params_ar
else:
polynomial_ar = self.polynomial_ar.real.astype(params.dtype)
polynomial_ar[self._polynomial_ar_idx] = -params_ar
self.polynomial_ar = polynomial_ar
if self.k_ma > 0:
if self.polynomial_ma.dtype == params.dtype:
self.polynomial_ma[self._polynomial_ma_idx] = params_ma
else:
polynomial_ma = self.polynomial_ma.real.astype(params.dtype)
polynomial_ma[self._polynomial_ma_idx] = params_ma
self.polynomial_ma = polynomial_ma
if self.k_seasonal_ar > 0:
idx = self._polynomial_seasonal_ar_idx
if self.polynomial_seasonal_ar.dtype == params.dtype:
self.polynomial_seasonal_ar[idx] = -params_seasonal_ar
else:
polynomial_seasonal_ar = (
self.polynomial_seasonal_ar.real.astype(params.dtype)
)
polynomial_seasonal_ar[idx] = -params_seasonal_ar
self.polynomial_seasonal_ar = polynomial_seasonal_ar
if self.k_seasonal_ma > 0:
idx = self._polynomial_seasonal_ma_idx
if self.polynomial_seasonal_ma.dtype == params.dtype:
self.polynomial_seasonal_ma[idx] = params_seasonal_ma
else:
polynomial_seasonal_ma = (
self.polynomial_seasonal_ma.real.astype(params.dtype)
)
polynomial_seasonal_ma[idx] = params_seasonal_ma
self.polynomial_seasonal_ma = polynomial_seasonal_ma
# Get the reduced form lag polynomial terms by multiplying the regular
# and seasonal lag polynomials
# Note: that although the numpy np.polymul examples assume that they
# are ordered from highest degree to lowest, whereas our are from
# lowest to highest, it does not matter.
if self.k_seasonal_ar > 0:
reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
else:
reduced_polynomial_ar = -self.polynomial_ar
if self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
else:
reduced_polynomial_ma = self.polynomial_ma
# Observation intercept
# Exogenous data with MLE estimation of parameters enters through a
# time-varying observation intercept (is equivalent to simply
# subtracting it out of the endogenous variable first)
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]
# State intercept (Harvey) or additional observation intercept
# (Hamilton)
# SARIMA trend enters through the a time-varying state intercept,
# associated with the first row of the stationary component of the
# state vector (i.e. the first element of the state vector following
# any differencing elements)
if self.k_trend > 0:
data = np.dot(self._trend_data, params_trend).astype(params.dtype)
if not self.hamilton_representation:
self.ssm['state_intercept', self._k_states_diff, :] = data
else:
# The way the trend enters in the Hamilton representation means
# that the parameter is not an ``intercept'' but instead the
# mean of the process. The trend values in `data` are meant for
# an intercept, and so must be transformed to represent the
# mean instead
if self.hamilton_representation:
data /= np.sum(-reduced_polynomial_ar)
# If we already set the observation intercept for MLE
# regression, just add to it
if self.mle_regression:
self.ssm.obs_intercept += data[None, :]
# Otherwise set it directly
else:
self.ssm['obs_intercept'] = data[None, :]
# Observation covariance matrix
if self.measurement_error:
self.ssm['obs_cov', 0, 0] = params_measurement_variance
# Transition matrix
if self.k_ar > 0 or self.k_seasonal_ar > 0:
self.ssm[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]
elif not self.ssm.transition.dtype == params.dtype:
# This is required if the transition matrix is not really in use
# (e.g. for an MA(q) process) so that it's dtype never changes as
# the parameters' dtype changes. This changes the dtype manually.
self.ssm['transition'] = self.ssm['transition'].real.astype(
params.dtype)
# Selection matrix (Harvey) or Design matrix (Hamilton)
if self.k_ma > 0 or self.k_seasonal_ma > 0:
if not self.hamilton_representation:
self.ssm[self.selection_ma_params_idx] = (
reduced_polynomial_ma[1:]
)
else:
self.ssm[self.design_ma_params_idx] = reduced_polynomial_ma[1:]
# State covariance matrix
if self.k_posdef > 0:
self.ssm['state_cov', 0, 0] = params_variance
if self.state_regression and self.time_varying_regression:
self.ssm[self._exog_variance_idx] = params_exog_variance
# Initialize
if not self._manual_initialization:
self.initialize_state(complex_step=complex_step)
return params
class SARIMAXResults(MLEResults):
"""
Class to hold results from fitting an SARIMAX model.
Parameters
----------
model : SARIMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the SARIMAX model instance.
polynomial_ar : array
Array containing autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients, ordered from lowest
degree to highest. Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
model_orders : list of int
The orders of each of the polynomials in the model.
param_terms : list of str
List of parameters actually included in the model, in sorted order.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(SARIMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
# Save _init_kwds
self._init_kwds = self.model._get_init_kwds()
# Save model specification
self.specification = Bunch(**{
# Set additional model parameters
'seasonal_periods': self.model.seasonal_periods,
'measurement_error': self.model.measurement_error,
'time_varying_regression': self.model.time_varying_regression,
'simple_differencing': self.model.simple_differencing,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'hamilton_representation': self.model.hamilton_representation,
'order': self.model.order,
'seasonal_order': self.model.seasonal_order,
# Model order
'k_diff': self.model.k_diff,
'k_seasonal_diff': self.model.k_seasonal_diff,
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
'k_seasonal_ar': self.model.k_seasonal_ar,
'k_seasonal_ma': self.model.k_seasonal_ma,
# Param Numbers
'k_ar_params': self.model.k_ar_params,
'k_ma_params': self.model.k_ma_params,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
'mle_regression': self.model.mle_regression,
'state_regression': self.model.state_regression,
})
# Polynomials
self.polynomial_trend = self.model.polynomial_trend
self.polynomial_ar = self.model.polynomial_ar
self.polynomial_ma = self.model.polynomial_ma
self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar
self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma
self.polynomial_reduced_ar = np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
self.polynomial_reduced_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
# Distinguish parameters
self.model_orders = self.model.model_orders
self.param_terms = self.model.param_terms
start = end = 0
for name in self.param_terms:
end += self.model_orders[name]
setattr(self, '_params_%s' % name, self.params[start:end])
start += self.model_orders[name]
# Handle removing data
self._data_attr_model.extend(['orig_endog', 'orig_exog'])
@cache_readonly
def arroots(self):
"""
(array) Roots of the reduced form autoregressive lag polynomial
"""
return np.roots(self.polynomial_reduced_ar)**-1
@cache_readonly
def maroots(self):
"""
(array) Roots of the reduced form moving average lag polynomial
"""
return np.roots(self.polynomial_reduced_ma)**-1
@cache_readonly
def arfreq(self):
"""
(array) Frequency of the roots of the reduced form autoregressive
lag polynomial
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def mafreq(self):
"""
(array) Frequency of the roots of the reduced form moving average
lag polynomial
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def arparams(self):
"""
(array) Autoregressive parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ar
@cache_readonly
def maparams(self):
"""
(array) Moving average parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ma
def get_prediction(self, start=None, end=None, dynamic=False, index=None,
exog=None, **kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : boolean, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = self.model._index[0]
# Handle start, end, dynamic
_start, _end, _out_of_sample, prediction_index = (
self.model._get_prediction_index(start, end, index, silent=True))
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
model_kwargs = self._init_kwds.copy()
model_kwargs['exog'] = exog
model = SARIMAX(endog, **model_kwargs)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.', ValueWarning)
return super(SARIMAXResults, self).get_prediction(
start=start, end=end, dynamic=dynamic, index=index, exog=exog,
**kwargs)
def summary(self, alpha=.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ''
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ''
has_seasonal = (
self.model.k_seasonal_ar +
self.model.k_seasonal_diff +
self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = (
int(self.model.k_seasonal_ar / self.model.seasonal_periods)
)
else:
order_seasonal_ar = (
tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
)
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = (
int(self.model.k_seasonal_ma / self.model.seasonal_periods)
)
else:
order_seasonal_ma = (
tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = ('(%s, %d, %s, %d)' %
(str(order_seasonal_ar), k_seasonal_diff,
str(order_seasonal_ma),
self.model.seasonal_periods))
if not order == '':
order += 'x'
model_name = (
'%s%s%s' % (self.model.__class__.__name__, order, seasonal_order)
)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class SARIMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(SARIMAXResultsWrapper, SARIMAXResults)
| bert9bert/statsmodels | statsmodels/tsa/statespace/sarimax.py | Python | bsd-3-clause | 82,327 |
import warnings
from copy import deepcopy
from scipy import fftpack as fp
from scipy import signal
import numpy as np
from matplotlib import pyplot as plt
from lib2.IQPulseSequence import IQPulseBuilder
from lib2.MeasurementResult import MeasurementResult
import lib2.directMeasurements.digitizerTimeResolvedDirectMeasurement as dtrdm
from typing import List
from drivers.Spectrum_m4x import SPCM
from drivers.E8257D import MXG
from drivers.IQAWG import IQAWG
from drivers.Yokogawa_GS210 import Yokogawa_GS210
from lib.iq_downconversion_calibration import IQDownconversionCalibrationResult
class SimulataneousReflectionTransmission(
dtrdm.DigitizerTimeResolvedDirectMeasurement):
def __init__(self, name, sample_name, devs_aliases_map,
plot_update_interval=1, save_traces=False):
super().__init__(name, sample_name, devs_aliases_map,
plot_update_interval, save_traces)
self._delay_correction = 0
self._sequence_generator = \
IQPulseBuilder.build_stimulated_emission_sequence
# if pulse edges are being subtraced,this parameter scales
# the default pulse edge interval (before and after main pulse)
self._pulse_edge_mult = None
# Get absolute value of measured traces in every iteration
self.option_abs = False
# Rotate the phase of the pulse for subtraction of the pa
self._subtract_pi = False
self._shifted_traces = [] # backup list with traces
# Coil currents. One is used for the measurement, another for
# shifting the qubit away from resonance
self._main_current = 0
self._shifted_current = 0
self._ro_cal = None
self._ro_cals = None
self._downconv_cals = None
self._shifts = None
def _init_measurement_result(self):
self._measurement_result = ReflectionTransmissionResult(
self._name, self._sample_name)
def set_fixed_parameters(self, pulse_sequence_parameters,
freq_limits=(-50e6, 50e6), delay_correction=0,
down_conversion_calibration=None,
subtract_pi=False,
pulse_edge_mult=1.0,
q_lo_params=None,
q_iqawg_params=None, dig_params=None,
filter=True):
"""
Parameters
----------
pulse_sequence_parameters: dict
single pulse parameters
freq_limits: tuple of 2 values
delay_correction: int
A correction of a digitizer delay given in samples
For flexibility, it is applied after the measurement. For example,
when you look at the trace and decide, that the digitizer delay
should have been different
down_conversion_calibration: IQDownconversionCalibrationResult
subtract_pi: bool
True if you want to make the Furier spectrum clearer by
subtracting the same trace with pulses shifted by phase pi
q_lo_params
q_iqawg_params
dig_params
Returns
-------
Nothing
"""
q_lo_params[0]["power"] = q_iqawg_params[0]["calibration"] \
.get_radiation_parameters()["lo_power"]
# a snapshot of initial seq pars structure passed into measurement
self._pulse_sequence_parameters_init = deepcopy(
pulse_sequence_parameters
)
super().set_fixed_parameters(
pulse_sequence_parameters,
freq_limits=freq_limits,
down_conversion_calibration=down_conversion_calibration,
q_lo_params=q_lo_params,
q_iqawg_params=q_iqawg_params,
dig_params=dig_params
)
self._subtract_pi = subtract_pi
self._pulse_edge_mult = pulse_edge_mult
self._delay_correction = delay_correction
self._measurement_result._freq_lims = freq_limits
self.apply_filter = filter # Flag: apply a digital FIR filter
# longest repetition period is initially set with data from
# 'pulse_sequence_paramaters'
self.max_segment_duration = \
pulse_sequence_parameters["repetition_period"] * \
pulse_sequence_parameters["periods_per_segment"]
dig = self._dig[0]
""" Supplying additional arrays to 'self._measurement_result' class """
meas_data = self._measurement_result.get_data()
# if_freq is already set in call of 'super()' class method
meas_data["if_freq"] = self._frequencies
meas_data["delay_correction"] = self._delay_correction
meas_data["start_idx"] = self._start_idx
meas_data["end_idx"] = self._end_idx
# time in nanoseconds
meas_data["sample_rate"] = dig.get_sample_rate()
self._measurement_result.set_data(meas_data)
def _get_longest_pulse_sequence_duration(self, pulse_sequence_parameters,
swept_pars):
return 0
def sweep_pulse_amplitude(self, amplitude_coefficients):
self._name += "_ampl"
swept_pars = {"Pulse amplitude coefficient": (
self._set_excitation_amplitude, amplitude_coefficients)}
self.set_swept_parameters(**swept_pars)
def _set_excitation_amplitude(self, amplitude_coefficient):
self._pulse_sequence_parameters["excitation_amplitudes"] = [
amplitude_coefficient]
self._output_pulse_sequence()
def sweep_repetition_period(self, periods):
self.set_swept_parameters(**{"Period of pulses repetition, ns": (
self._set_repetition_period, periods)})
self.max_segment_duration = \
np.max(periods) * \
self._pulse_sequence_parameters["periods_per_segment"]
self._measurement_result.set_parameter_name(
"Period of pulses repetition, ns")
def _set_repetition_period(self, period):
self._pulse_sequence_parameters["repetition_period"] = period
self._dig[0].dur_seg_ns = \
self._pulse_sequence_parameters["periods_per_segment"] * period
self._output_pulse_sequence()
def sweep_pulse_phase(self, phases):
self._name += "_phase"
swept_pars = {"Pulse phase, radians": (self._set_phase_shift, phases)}
self.set_swept_parameters(**swept_pars)
def _set_phase_shift(self, phase):
self._pulse_sequence_parameters["phase_shifts"] = [phase]
self._output_pulse_sequence()
def sweep_lo_shift(self, shifts, ro_cals, downconv_cals):
self._name += "_lo"
self._ro_cals = ro_cals
self._downconv_cals = downconv_cals
self._shifts = shifts
swept_pars = {"LO shift, Hz": (self._set_lo_shift, shifts)}
self.set_swept_parameters(**swept_pars)
def _set_lo_shift(self, shift):
idx = np.abs(self._shifts - shift).argmin()
ro_cal = self._ro_cals[idx]
self._q_lo[0].set_frequency(ro_cal.get_lo_frequency())
self._q_iqawg[0].set_parameters({"calibration": ro_cal})
self._down_conversion_calibration = self._downconv_cals[idx]
if self._down_conversion_calibration is not None:
self._down_conversion_calibration.set_shift(shift)
self._output_pulse_sequence()
def _obtain_shifted_trace(self):
self._src[0].set_current(self._shifted_current)
time, trace = self._measure_one_trace()
self._shifted_traces.append(trace)
self._src[0].set_current(self._main_current)
def _recording_iteration(self):
time, data = self._measure_one_trace()
self.data_backup.append(data.copy())
if self._subtract_pi:
phase = 0
if "phase_shifts" in self._pulse_sequence_parameters.keys():
phase = self._pulse_sequence_parameters["phase_shifts"][0]
self._pulse_sequence_parameters["phase_shifts"] = [phase + np.pi]
self._output_pulse_sequence()
time, data_pi = self._measure_one_trace()
self.data_pi_backup.append(data_pi.copy())
data -= data_pi
self._pulse_sequence_parameters["phase_shifts"] = [phase]
# Subtract the trace with a pulse, that was shifted far from
# resonance. Can be used to subtract the pulse shape and preserve
# quantum oscillations
if self._subtract_shifted:
self._obtain_shifted_trace()
data -= self._shifted_traces[-1]
# Parameters 'first_pulse_start' and 'last_pulse_end' are calculated
# and stored into 'pulse_sequence_parameters' during the last call
# to 'self._sequence_generator' function
pulse_start = \
self._pulse_sequence_parameters["first_pulse_start"] # ns
pulse_end = \
self._pulse_sequence_parameters["last_pulse_end"] # ns
pulse_edge = 0
if self._pulse_sequence_parameters["modulating_window"] == "tukey":
pulse_edge = self._pulse_sequence_parameters["window_parameter"]
pulse_edge *= (pulse_end - pulse_start)/2 * self._pulse_edge_mult
target_interval = (pulse_start + pulse_edge, pulse_end - pulse_edge)
# constructing a mask for pulses
def belongs(t, interval):
return (t >= interval[0]) & (t <= interval[1])
repetition_period = self._pulse_sequence_parameters[
"repetition_period"] # ns
pulses_mask = belongs(time % repetition_period, target_interval)
# CHANGE_1 UNCOMMENT THIS
if self.option_abs is True:
data = np.abs(data)**2
else:
if_freq = self._q_iqawg[0].get_calibration().get_if_frequency()
data = data * np.exp(-2j * np.pi * if_freq * time * 1e-9)
# filtering
if self.apply_filter:
b = signal.firwin(len(data), self._freq_limits[1],
fs=self._dig[0].get_sample_rate())
data = signal.convolve(data, b, "same")
if self._cut_everything_outside_the_pulse:
# the trace outside pulses is set to zero
data[np.logical_not(pulses_mask)] = 0
max_length = int(
self.max_segment_duration * 1e-9 * # seconds
self._dig[0].get_sample_rate() # Hz
)
# CHANGE_1 place this after 'data = np.abs(data)'
# subtracting pulse amplitude from pulses
# + 'data -> data[pulse_mask]'
data[pulses_mask] -= np.mean(data[pulses_mask])
# CHANGE_1, copy is needed to insure reference count safety
# if copy is ommited, then "does not own it's value" exception
# is raised
data = data.copy()
data.resize(max_length) # note that np.resize() works differently
return data
class ReflectionTransmissionResult(MeasurementResult):
def __init__(self, name, sample_name):
super().__init__(name, sample_name)
self.ylabel = r"$\vert F[ \vert \left<V(t)\right> \vert^2 ](" \
r"f)\vert$, dBm"
self._parameter_name = None
self._fft_vertical_shift = 50 # dB
self._trace_vertical_shift = 0.75 # mV
self._freq_lims = [-50, 50] # MHz
self._yf_lims = [-80, 0] # dBm
# in case custom fourier transform required
# (at user-define frequencies)
self._custom_fft_freqs: np.ndarray = None # custom frequencies
# flag that indicates that custom fft
# should be used
self._is_custom_fft: bool = False
def set_custom_fft_freqs(self, freqs):
"""
Sets custom frequencies for fourier transformation.
Not efficient but gives freedom in choosing frequencies
hence if_freq domain interval and resolution.
Parameters
----------
freqs : np.ndarray
1D float array with desired frequencies in Hz
Returns
-------
None
"""
self._custom_fft_freqs = freqs
self._is_custom_fft = True
def custom_fourier(self, complex_data, dt, custom_freqs):
"""
Performs fourier transform on last axis of 'complex_data'
Parameters
----------
complex_data : np.ndarray
array that has time traces stored along last dimension
dt : float
distance between successive points in time domain
custom_freqs : np.ndarray
frequencies where you wish to perform fourier transform
Returns
-------
np.ndarray
"""
time_arr = np.linspace(
0,
complex_data.shape[-1] * dt,
complex_data.shape[-1],
endpoint=False)
"""
Memory usage check.
Large data and frequencies arrays may cause some data to be
dumped to hard drive. This is to be avoided
The upper bounds for memory usage is 1 GByte.
"""
# np.float64 assumed
assumed_size_bytes = len(time_arr)*len(custom_freqs)*64/8
if assumed_size_bytes / 2**30 > 1:
self.set_is_finished(True)
raise UserWarning("Custom fourier transform requires more than "
"1 GB of memory, execution is terminated")
tmp = np.exp(
-1j * 2 * np.pi * np.tensordot(custom_freqs, time_arr, axes=0)
) # len(freqs) x len(time) 2D array
# tensor contraction along last axes
# result has shape = (complex_data.shape[:-1], len(freqs))
fourier_data = np.tensordot(
complex_data, tmp,
axes=([-1], [-1])
) / np.sqrt(complex_data.shape[-1])
return fourier_data
def func_over_trace(self, trace):
# could be np.real, np.imag, etc.
return np.abs(trace)
def log_func(self, yf):
# could be 20 * np.log10(yf), or just yf
return 20 * np.log10(yf)
def set_parameter_name(self, parameter_name):
self._parameter_name = parameter_name
def _prepare_figure(self):
self._last_tr = None
self._peaks_last_tr = None
fig = plt.figure(figsize=(16, 9))
# 2x2 grid for axes to occupy several cells
gs = fig.add_gridspec(2, 2)
# fourier data axis
ax_fft = fig.add_subplot(gs[0, :]) # occupies entire first row
ax_fft.ticklabel_format(axis='x', style='plain')#, scilimits=(-2, 2))
# ax_fft.set_ylabel(self.ylabel)
ax_fft.set_xlabel("Frequency, MHz")
ax_fft.grid()
# time domain axes
ax_real = fig.add_subplot(gs[1, 0]) # bottom left in 2x2 grid
# ax_real.set_ylabel(r"$\left<V(t)\right>$, mV")
ax_real.set_xlabel("t, ns")
ax_real.grid()
ax_imag = fig.add_subplot(gs[1, 1], sharex=ax_real, sharey=ax_real) #
# bottom
# right in 2x2
# grid
# ax_imag.set_ylabel(r"$\left<V(t)\right>$, mV")
ax_imag.set_xlabel("t, ns")
ax_imag.grid()
fig.tight_layout()
return fig, (ax_fft, ax_real, ax_imag), (None,)
def _plot(self, data):
ax_fft, ax_real, ax_imag = self._axes
if "data" not in data.keys():
return
t, y, freqs, yfft_db, colors, legend = self._prepare_data_for_plot(data)
# TODO optimize: get rid of 'cla()' and add data instead of
# redrawing everying for every '_plot' call
ax_fft.cla()
ax_real.cla()
ax_imag.cla()
XX, YY = np.meshgrid(t, data[self._parameter_names[0]])
ff, pp = np.meshgrid(freqs, data[self._parameter_names[0]])
re_max = np.max(np.abs(np.real(y)))
im_max = np.max(np.abs(np.imag(y)))
# for i in range(len(y)):
# ax_fft.plot(freqs, yfft_db[i] + i * self._fft_vertical_shift,
# color=colors[i])
# ax_real.plot(t, np.real(y[i]) + i * self._trace_vertical_shift,
# color=colors[i])
ax_fft.pcolormesh(ff, pp, yfft_db, cmap="inferno")
ax_real.pcolormesh(XX, YY, np.real(y), cmap="RdBu", vmax=re_max,
vmin=-re_max)
ax_imag.pcolormesh(XX, YY, np.imag(y), cmap="RdBu", vmax=im_max,
vmin=-im_max)
# ax_imag.plot(t, np.imag(y[i]) + i * self._trace_vertical_shift,
# color=colors[i])
# ax_fft.legend(legend, title=self._parameter_name, loc="upper right")
ax_fft.set_title("DFFT of abs(time trace)")
ax_fft.grid()
ax_fft.set_xlim(self._freq_lims)
# ax_fft.relim()
ax_fft.set_ylabel(self._parameter_names[0])
# ax_fft.set_ylabel(self.ylabel)
ax_fft.set_xlabel("Frequency, MHz")
ax_fft.autoscale_view(True, True, True)
ax_real.grid()
# ax_real.legend(legend, title=self._parameter_name, loc="upper right")
ax_real.set_xlim(t[0], t[-1])
# ax_real.set_ylabel(r"$Re \left[ \left< V(t) \right> \right]$, mV")
ax_real.set_ylabel(self._parameter_names[0])
ax_real.set_xlabel("t, ns")
ax_real.autoscale_view(True, True, True)
ax_imag.grid()
# ax_imag.legend(legend, title=self._parameter_name, loc="upper right")
ax_imag.set_xlim(t[0], t[-1])
# ax_imag.set_ylabel(r"$Im \left[ \left< V(t) \right> \right]$, mV")
ax_imag.set_xlabel("t, ns")
ax_imag.autoscale_view(True, True, True)
def _prepare_data_for_plot(self, data):
"""
Parameters
----------
data : dict[str, np.ndarray]
all data acquired during measurement
Returns
-------
None
"""
amps = data[self._parameter_names[0]]
complex_traces = data["data"]
# 'self._current_iteration_idx' is > 0 if this function is called
# from 'self._plot' due to the check of "data" key presence in
# 'self.data'. If "data" key is present, then
available_data_n = self._iter_idx_ready[-1]
yfft_db = None # dB of the np.abs(yfft)
freqs = None # fft frequencies
if self._is_custom_fft:
dt = 1/data["sample_rate"] # in seconds
yfft_db = self.log_func(
np.abs(
self.custom_fourier(
complex_traces,#[:available_data_n + 1],
dt,
self._custom_fft_freqs
)
) / np.sqrt(complex_traces.shape[-1])
)
freqs = self._custom_fft_freqs
else:
yfft_db = self.log_func(
np.abs(
np.fft.fftshift(
np.fft.fft(
complex_traces,#[:available_data_n+1],
axis=-1
),
axes=-1
) / np.sqrt(complex_traces.shape[-1])
)
)
# 'sample_rate' in seconds so 'freqs' in Hz
freqs = fp.fftshift(
fp.fftfreq(complex_traces.shape[-1], d=1 / data["sample_rate"])
)
yfft_db[np.isneginf(yfft_db)] = 0
# exclude singularity in logarithmic scale
# that arises due to trace's dc offset equals zero
dc_freq_idx = np.argmin(np.abs(freqs))
for i in range(len(yfft_db)):
yfft_db[i, dc_freq_idx] = np.mean(yfft_db[i])
N = len(amps)
colors = plt.cm.viridis_r(np.linspace(0, 1, N))
legend = [f"{amps[i]:.2f}" for i in range(available_data_n+1)]
t = np.linspace(
0,
complex_traces.shape[-1] / data["sample_rate"],
complex_traces.shape[-1],
endpoint=False
) * 1e9 # ns
return (t, complex_traces, freqs,
yfft_db, colors, legend)
| vdrhtc/Measurement-automation | lib2/simultaneousReflectionTransmission.py | Python | gpl-3.0 | 20,018 |
from req import ApiRequestHandler
from req import Service
import tornado
class ApiLoginHandler(ApiRequestHandler):
@tornado.gen.coroutine
def post(self):
args = ['username', 'password']
meta = self.get_args(args)
err, token = yield from Service.User.login(self, meta)
if err: self.render(403, err)
else: self.render(200, token)
| kevchentw/nctu_hackathon | backend/api/login.py | Python | mit | 380 |
from enigma import getPrevAsciiCode
from Tools.NumericalTextInput import NumericalTextInput
from Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists
from Components.Harddisk import harddiskmanager
from copy import copy as copy_copy
from os import path as os_path
from time import localtime, strftime
# ConfigElement, the base class of all ConfigElements.
# it stores:
# value the current value, usefully encoded.
# usually a property which retrieves _value,
# and maybe does some reformatting
# _value the value as it's going to be saved in the configfile,
# though still in non-string form.
# this is the object which is actually worked on.
# default the initial value. If _value is equal to default,
# it will not be stored in the config file
# saved_value is a text representation of _value, stored in the config file
#
# and has (at least) the following methods:
# save() stores _value into saved_value,
# (or stores 'None' if it should not be stored)
# load() loads _value from saved_value, or loads
# the default if saved_value is 'None' (default)
# or invalid.
#
class ConfigElement(object):
def __init__(self):
self.saved_value = None
self.save_forced = False
self.last_value = None
self.save_disabled = False
self.__notifiers = None
self.__notifiers_final = None
self.enabled = True
self.callNotifiersOnSaveAndCancel = False
def getNotifiers(self):
if self.__notifiers is None:
self.__notifiers = [ ]
return self.__notifiers
def setNotifiers(self, val):
self.__notifiers = val
notifiers = property(getNotifiers, setNotifiers)
def getNotifiersFinal(self):
if self.__notifiers_final is None:
self.__notifiers_final = [ ]
return self.__notifiers_final
def setNotifiersFinal(self, val):
self.__notifiers_final = val
notifiers_final = property(getNotifiersFinal, setNotifiersFinal)
# you need to override this to do input validation
def setValue(self, value):
self._value = value
self.changed()
def getValue(self):
return self._value
value = property(getValue, setValue)
# you need to override this if self.value is not a string
def fromstring(self, value):
return value
# you can overide this for fancy default handling
def load(self):
sv = self.saved_value
if sv is None:
self.value = self.default
else:
self.value = self.fromstring(sv)
def tostring(self, value):
return str(value)
# you need to override this if str(self.value) doesn't work
def save(self):
if self.save_disabled or (self.value == self.default and not self.save_forced):
self.saved_value = None
else:
self.saved_value = self.tostring(self.value)
if self.callNotifiersOnSaveAndCancel:
self.changed()
def cancel(self):
self.load()
if self.callNotifiersOnSaveAndCancel:
self.changed()
def isChanged(self):
sv = self.saved_value
if sv is None and self.value == self.default:
return False
return self.tostring(self.value) != sv
def changed(self):
if self.__notifiers:
for x in self.notifiers:
x(self)
def changedFinal(self):
if self.__notifiers_final:
for x in self.notifiers_final:
x(self)
def addNotifier(self, notifier, initial_call = True, immediate_feedback = True):
assert callable(notifier), "notifiers must be callable"
if immediate_feedback:
self.notifiers.append(notifier)
else:
self.notifiers_final.append(notifier)
# CHECKME:
# do we want to call the notifier
# - at all when adding it? (yes, though optional)
# - when the default is active? (yes)
# - when no value *yet* has been set,
# because no config has ever been read (currently yes)
# (though that's not so easy to detect.
# the entry could just be new.)
if initial_call:
notifier(self)
def disableSave(self):
self.save_disabled = True
def __call__(self, selected):
return self.getMulti(selected)
def onSelect(self, session):
pass
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
KEY_LEFT = 0
KEY_RIGHT = 1
KEY_OK = 2
KEY_DELETE = 3
KEY_BACKSPACE = 4
KEY_HOME = 5
KEY_END = 6
KEY_TOGGLEOW = 7
KEY_ASCII = 8
KEY_TIMEOUT = 9
KEY_NUMBERS = range(12, 12+10)
KEY_0 = 12
KEY_9 = 12+9
def getKeyNumber(key):
assert key in KEY_NUMBERS
return key - KEY_0
class choicesList(object): # XXX: we might want a better name for this
LIST_TYPE_LIST = 1
LIST_TYPE_DICT = 2
def __init__(self, choices, type = None):
self.choices = choices
if type is None:
if isinstance(choices, list):
self.type = choicesList.LIST_TYPE_LIST
elif isinstance(choices, dict):
self.type = choicesList.LIST_TYPE_DICT
else:
assert False, "choices must be dict or list!"
else:
self.type = type
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices.keys()
return ret or [""]
def __iter__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices
return iter(ret or [""])
def __len__(self):
return len(self.choices) or 1
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
ret = self.choices[index]
if isinstance(ret, tuple):
ret = ret[0]
return ret
return self.choices.keys()[index]
def index(self, value):
try:
return self.__list__().index(value)
except (ValueError, IndexError):
# occurs e.g. when default is not in list
return 0
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
if isinstance(orig, tuple):
self.choices[index] = (value, orig[1])
else:
self.choices[index] = value
else:
key = self.choices.keys()[index]
orig = self.choices[key]
del self.choices[key]
self.choices[value] = orig
def default(self):
choices = self.choices
if not choices:
return ""
if self.type is choicesList.LIST_TYPE_LIST:
default = choices[0]
if isinstance(default, tuple):
default = default[0]
else:
default = choices.keys()[0]
return default
class descriptionList(choicesList): # XXX: we might want a better name for this
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[1] for x in self.choices]
else:
ret = self.choices.values()
return ret or [""]
def __iter__(self):
return iter(self.__list__())
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
for x in self.choices:
if isinstance(x, tuple):
if x[0] == index:
return str(x[1])
elif x == index:
return str(x)
return str(index) # Fallback!
else:
return str(self.choices.get(index, ""))
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
i = self.index(index)
orig = self.choices[i]
if isinstance(orig, tuple):
self.choices[i] = (orig[0], value)
else:
self.choices[i] = value
else:
self.choices[index] = value
#
# ConfigSelection is a "one of.."-type.
# it has the "choices", usually a list, which contains
# (id, desc)-tuples (or just only the ids, in case the id
# will be used as description)
#
# all ids MUST be plain strings.
#
class ConfigSelection(ConfigElement):
def __init__(self, choices, default = None):
ConfigElement.__init__(self)
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self._descr = None
self.default = self._value = self.last_value = default
def setChoices(self, choices, default = None):
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self.default = default
if self.value not in self.choices:
self.value = default
def setValue(self, value):
if value in self.choices:
self._value = value
else:
self._value = self.default
self._descr = None
self.changed()
def tostring(self, val):
return val
def getValue(self):
return self._value
def setCurrentText(self, text):
i = self.choices.index(self.value)
self.choices[i] = text
self._descr = self.description[text] = text
self._value = text
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
# GUI
def handleKey(self, key):
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(self.value)
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
def selectNext(self):
nchoices = len(self.choices)
i = self.choices.index(self.value)
self.value = self.choices[(i + 1) % nchoices]
def getText(self):
if self._descr is not None:
return self._descr
descr = self._descr = self.description[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
if self._descr is not None:
descr = self._descr
else:
descr = self._descr = self.description[self.value]
if descr:
return ("text", _(descr))
return ("text", descr)
# HTML
def getHTML(self, id):
res = ""
for v in self.choices:
descr = self.description[v]
if self.value == v:
checked = 'checked="checked" '
else:
checked = ''
res += '<input type="radio" name="' + id + '" ' + checked + 'value="' + v + '">' + descr + "</input></br>\n"
return res;
def unsafeAssign(self, value):
# setValue does check if value is in choices. This is safe enough.
self.value = value
description = property(lambda self: descriptionList(self.choices.choices, self.choices.type))
# a binary decision.
#
# several customized versions exist for different
# descriptions.
#
boolean_descriptions = {False: _("false"), True: _("true")}
class ConfigBoolean(ConfigElement):
def __init__(self, default = False, descriptions = boolean_descriptions):
ConfigElement.__init__(self)
self.descriptions = descriptions
self.value = self.last_value = self.default = default
def handleKey(self, key):
if key in (KEY_LEFT, KEY_RIGHT):
self.value = not self.value
elif key == KEY_HOME:
self.value = False
elif key == KEY_END:
self.value = True
def getText(self):
descr = self.descriptions[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
descr = self.descriptions[self.value]
if descr:
return ("text", _(descr))
return ("text", descr)
def tostring(self, value):
if not value:
return "false"
else:
return "true"
def fromstring(self, val):
if val == "true":
return True
else:
return False
def getHTML(self, id):
if self.value:
checked = ' checked="checked"'
else:
checked = ''
return '<input type="checkbox" name="' + id + '" value="1" ' + checked + " />"
# this is FLAWED. and must be fixed.
def unsafeAssign(self, value):
if value == "1":
self.value = True
else:
self.value = False
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
yes_no_descriptions = {False: _("no"), True: _("yes")}
class ConfigYesNo(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = yes_no_descriptions)
on_off_descriptions = {False: _("off"), True: _("on")}
class ConfigOnOff(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = on_off_descriptions)
enable_disable_descriptions = {False: _("disable"), True: _("enable")}
class ConfigEnableDisable(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = enable_disable_descriptions)
class ConfigDateTime(ConfigElement):
def __init__(self, default, formatstring, increment = 86400):
ConfigElement.__init__(self)
self.increment = increment
self.formatstring = formatstring
self.value = self.last_value = self.default = int(default)
def handleKey(self, key):
if key == KEY_LEFT:
self.value = self.value - self.increment
elif key == KEY_RIGHT:
self.value = self.value + self.increment
elif key == KEY_HOME or key == KEY_END:
self.value = self.default
def getText(self):
return strftime(self.formatstring, localtime(self.value))
def getMulti(self, selected):
return ("text", strftime(self.formatstring, localtime(self.value)))
def fromstring(self, val):
return int(val)
# *THE* mighty config element class
#
# allows you to store/edit a sequence of values.
# can be used for IP-addresses, dates, plain integers, ...
# several helper exist to ease this up a bit.
#
class ConfigSequence(ConfigElement):
def __init__(self, seperator, limits, default, censor_char = ""):
ConfigElement.__init__(self)
assert isinstance(limits, list) and len(limits[0]) == 2, "limits must be [(min, max),...]-tuple-list"
assert censor_char == "" or len(censor_char) == 1, "censor char must be a single char (or \"\")"
#assert isinstance(default, list), "default must be a list"
#assert isinstance(default[0], int), "list must contain numbers"
#assert len(default) == len(limits), "length must match"
self.marked_pos = 0
self.seperator = seperator
self.limits = limits
self.censor_char = censor_char
self.last_value = self.default = default
self.value = copy_copy(default)
self.endNotifier = None
def validate(self):
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
if self._value[num] < self.limits[num][0]:
self._value[num] = self.limits[num][0]
if self._value[num] > self.limits[num][1]:
self._value[num] = self.limits[num][1]
num += 1
if self.marked_pos >= max_pos:
if self.endNotifier:
for x in self.endNotifier:
x(self)
self.marked_pos = max_pos - 1
if self.marked_pos < 0:
self.marked_pos = 0
def validatePos(self):
if self.marked_pos < 0:
self.marked_pos = 0
total_len = sum([len(str(x[1])) for x in self.limits])
if self.marked_pos >= total_len:
self.marked_pos = total_len - 1
def addEndNotifier(self, notifier):
if self.endNotifier is None:
self.endNotifier = []
self.endNotifier.append(notifier)
def handleKey(self, key):
if key == KEY_LEFT:
self.marked_pos -= 1
self.validatePos()
elif key == KEY_RIGHT:
self.marked_pos += 1
self.validatePos()
elif key == KEY_HOME:
self.marked_pos = 0
self.validatePos()
elif key == KEY_END:
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
num += 1
self.marked_pos = max_pos - 1
self.validatePos()
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
block_len = [len(str(x[1])) for x in self.limits]
total_len = sum(block_len)
pos = 0
blocknumber = 0
block_len_total = [0, ]
for x in block_len:
pos += block_len[blocknumber]
block_len_total.append(pos)
if pos - 1 >= self.marked_pos:
pass
else:
blocknumber += 1
# length of numberblock
number_len = len(str(self.limits[blocknumber][1]))
# position in the block
posinblock = self.marked_pos - block_len_total[blocknumber]
oldvalue = self._value[blocknumber]
olddec = oldvalue % 10 ** (number_len - posinblock) - (oldvalue % 10 ** (number_len - posinblock - 1))
newvalue = oldvalue - olddec + (10 ** (number_len - posinblock - 1) * number)
self._value[blocknumber] = newvalue
self.marked_pos += 1
self.validate()
self.changed()
def genText(self):
value = ""
mPos = self.marked_pos
num = 0;
for i in self._value:
if value: #fixme no heading separator possible
value += self.seperator
if mPos >= len(value) - 1:
mPos += 1
if self.censor_char == "":
value += ("%0" + str(len(str(self.limits[num][1]))) + "d") % i
else:
value += (self.censor_char * len(str(self.limits[num][1])))
num += 1
return (value, mPos)
def getText(self):
(value, mPos) = self.genText()
return value
def getMulti(self, selected):
(value, mPos) = self.genText()
# only mark cursor when we are selected
# (this code is heavily ink optimized!)
if self.enabled:
return ("mtext"[1-selected:], value, [mPos])
else:
return ("text", value)
def tostring(self, val):
return self.seperator.join([self.saveSingle(x) for x in val])
def saveSingle(self, v):
return str(v)
def fromstring(self, value):
return [int(x) for x in value.split(self.seperator)]
def onDeselect(self, session):
if self.last_value != self._value:
self.changedFinal()
self.last_value = copy_copy(self._value)
ip_limits = [(0,255),(0,255),(0,255),(0,255)]
class ConfigIP(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = ip_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:(self.marked_block)])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return (value, mBlock)
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return ("mtext"[1-selected:], value, mBlock)
else:
return ("text", value)
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
mac_limits = [(1,255),(1,255),(1,255),(1,255),(1,255),(1,255)]
class ConfigMAC(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ":", limits = mac_limits, default = default)
class ConfigPosition(ConfigSequence):
def __init__(self, default, args):
ConfigSequence.__init__(self, seperator = ",", limits = [(0,args[0]),(0,args[1]),(0,args[2]),(0,args[3])], default = default)
clock_limits = [(0,23),(0,59)]
class ConfigClock(ConfigSequence):
def __init__(self, default):
t = localtime(default)
ConfigSequence.__init__(self, seperator = ":", limits = clock_limits, default = [t.tm_hour, t.tm_min])
def increment(self):
# Check if Minutes maxed out
if self._value[1] == 59:
# Increment Hour, reset Minutes
if self._value[0] < 23:
self._value[0] += 1
else:
self._value[0] = 0
self._value[1] = 0
else:
# Increment Minutes
self._value[1] += 1
# Trigger change
self.changed()
def decrement(self):
# Check if Minutes is minimum
if self._value[1] == 0:
# Decrement Hour, set Minutes to 59
if self._value[0] > 0:
self._value[0] -= 1
else:
self._value[0] = 23
self._value[1] = 59
else:
# Decrement Minutes
self._value[1] -= 1
# Trigger change
self.changed()
integer_limits = (0, 9999999999)
class ConfigInteger(ConfigSequence):
def __init__(self, default, limits = integer_limits):
ConfigSequence.__init__(self, seperator = ":", limits = [limits], default = default)
# you need to override this to do input validation
def setValue(self, value):
self._value = [value]
self.changed()
def getValue(self):
return self._value[0]
value = property(getValue, setValue)
def fromstring(self, value):
return int(value)
def tostring(self, value):
return str(value)
class ConfigPIN(ConfigInteger):
def __init__(self, default, len = 4, censor = ""):
assert isinstance(default, int), "ConfigPIN default must be an integer"
if default == -1:
default = "aaaa"
ConfigSequence.__init__(self, seperator = ":", limits = [(0, (10**len)-1)], censor_char = censor, default = default)
self.len = len
def getLength(self):
return self.len
class ConfigFloat(ConfigSequence):
def __init__(self, default, limits):
ConfigSequence.__init__(self, seperator = ".", limits = limits, default = default)
def getFloat(self):
return float(self.value[1] / float(self.limits[1][1] + 1) + self.value[0])
float = property(getFloat)
# an editable text...
class ConfigText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", fixed_size = True, visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = fixed_size
self.visible_width = visible_width
self.offset = 0
self.overwrite = fixed_size
self.help_window = None
self.value = self.last_value = self.default = default
def validateMarker(self):
textlen = len(self.text)
if self.fixed_size:
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
else:
if self.marked_pos > textlen:
self.marked_pos = textlen
if self.marked_pos < 0:
self.marked_pos = 0
if self.visible_width:
if self.marked_pos < self.offset:
self.offset = self.marked_pos
if self.marked_pos >= self.offset + self.visible_width:
if self.marked_pos == textlen:
self.offset = self.marked_pos - self.visible_width
else:
self.offset = self.marked_pos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > textlen:
self.offset = max(0, len - self.visible_width)
def insertChar(self, ch, pos, owr):
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def deleteChar(self, pos):
if not self.fixed_size:
self.text = self.text[0:pos] + self.text[pos + 1:]
elif self.overwrite:
self.text = self.text[0:pos] + " " + self.text[pos + 1:]
else:
self.text = self.text[0:pos] + self.text[pos + 1:] + " "
def deleteAllChars(self):
if self.fixed_size:
self.text = " " * len(self.text)
else:
self.text = ""
self.marked_pos = 0
def handleKey(self, key):
# this will no change anything on the value itself
# so we can handle it here in gui element
if key == KEY_DELETE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.marked_pos)
if self.fixed_size and self.overwrite:
self.marked_pos += 1
elif key == KEY_BACKSPACE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
elif self.marked_pos > 0:
self.deleteChar(self.marked_pos-1)
if not self.fixed_size and self.offset > 0:
self.offset -= 1
self.marked_pos -= 1
elif key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
self.marked_pos += 1
elif key == KEY_HOME:
self.timeout()
self.allmarked = False
self.marked_pos = 0
elif key == KEY_END:
self.timeout()
self.allmarked = False
self.marked_pos = len(self.text)
elif key == KEY_TOGGLEOW:
self.timeout()
self.overwrite = not self.overwrite
elif key == KEY_ASCII:
self.timeout()
newChar = unichr(getPrevAsciiCode())
if not self.useableChars or newChar in self.useableChars:
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return ("mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark)
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return ("mtext"[1-selected:], self.text.encode("utf-8")+" ", mark)
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPassword(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False, censor = "*"):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
self.censor_char = censor
self.hidden = True
def getMulti(self, selected):
mtext, text, mark = ConfigText.getMulti(self, selected)
if self.hidden:
text = len(text) * self.censor_char
return (mtext, text, mark)
def onSelect(self, session):
ConfigText.onSelect(self, session)
self.hidden = False
def onDeselect(self, session):
ConfigText.onDeselect(self, session)
self.hidden = True
# lets the user select between [min, min+stepwidth, min+(stepwidth*2)..., maxval] with maxval <= max depending
# on the stepwidth
# min, max, stepwidth, default are int values
# wraparound: pressing RIGHT key at max value brings you to min value and vice versa if set to True
class ConfigSelectionNumber(ConfigSelection):
def __init__(self, min, max, stepwidth, default = None, wraparound = False):
self.wraparound = wraparound
if default is None:
default = min
default = str(default)
choices = []
step = min
while step <= max:
choices.append(str(step))
step += stepwidth
ConfigSelection.__init__(self, choices, default)
def getValue(self):
return int(ConfigSelection.getValue(self))
def setValue(self, val):
ConfigSelection.setValue(self, str(val))
def handleKey(self, key):
if not self.wraparound:
if key == KEY_RIGHT:
if len(self.choices) == (self.choices.index(self.value) + 1):
return
if key == KEY_LEFT:
if self.choices.index(self.value) == 0:
return
ConfigSelection.handleKey(self, key)
class ConfigNumber(ConfigText):
def __init__(self, default = 0):
ConfigText.__init__(self, str(default), fixed_size = False)
def getValue(self):
return int(self.text)
def setValue(self, val):
self.text = str(val)
value = property(getValue, setValue)
_value = property(getValue, setValue)
def isChanged(self):
sv = self.saved_value
strv = self.tostring(self.value)
if sv is None and strv == self.default:
return False
return strv != sv
def conform(self):
pos = len(self.text) - self.marked_pos
self.text = self.text.lstrip("0")
if self.text == "":
self.text = "0"
if pos > len(self.text):
self.marked_pos = 0
else:
self.marked_pos = len(self.text) - pos
def handleKey(self, key):
if key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
ascii = getPrevAsciiCode()
if not (48 <= ascii <= 57):
return
else:
ascii = getKeyNumber(key) + 48
newChar = unichr(ascii)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
else:
ConfigText.handleKey(self, key)
self.conform()
def onSelect(self, session):
self.allmarked = (self.value != "")
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigSearchText(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False, search = True)
class ConfigDirectory(ConfigText):
def __init__(self, default="", visible_width=60):
ConfigText.__init__(self, default, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
def getValue(self):
if self.text == "":
return None
else:
return ConfigText.getValue(self)
def setValue(self, val):
if val == None:
val = ""
ConfigText.setValue(self, val)
def getMulti(self, selected):
if self.text == "":
return ("mtext"[1-selected:], _("List of storage devices"), range(0))
else:
return ConfigText.getMulti(self, selected)
def onSelect(self, session):
self.allmarked = (self.value != "")
# a slider.
class ConfigSlider(ConfigElement):
def __init__(self, default = 0, increment = 1, limits = (0, 100)):
ConfigElement.__init__(self)
self.value = self.last_value = self.default = default
self.min = limits[0]
self.max = limits[1]
self.increment = increment
def checkValues(self):
if self.value < self.min:
self.value = self.min
if self.value > self.max:
self.value = self.max
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME:
self.value = self.min
elif key == KEY_END:
self.value = self.max
else:
return
self.checkValues()
def getText(self):
return "%d / %d" % (self.value, self.max)
def getMulti(self, selected):
self.checkValues()
return ("slider", self.value, self.max)
def fromstring(self, value):
return int(value)
# a satlist. in fact, it's a ConfigSelection.
class ConfigSatlist(ConfigSelection):
def __init__(self, list, default = None):
if default is not None:
default = str(default)
ConfigSelection.__init__(self, choices = [(str(orbpos), desc) for (orbpos, desc, flags) in list], default = default)
def getOrbitalPosition(self):
if self.value == "":
return None
return int(self.value)
orbital_position = property(getOrbitalPosition)
class ConfigSet(ConfigElement):
def __init__(self, choices, default = []):
ConfigElement.__init__(self)
if isinstance(choices, list):
choices.sort()
self.choices = choicesList(choices, choicesList.LIST_TYPE_LIST)
else:
assert False, "ConfigSet choices must be a list!"
if default is None:
default = []
self.pos = -1
default.sort()
self.last_value = self.default = default
self.value = default[:]
def toggleChoice(self, choice):
value = self.value
if choice in value:
value.remove(choice)
else:
value.append(choice)
value.sort()
self.changed()
def handleKey(self, key):
if key in KEY_NUMBERS + [KEY_DELETE, KEY_BACKSPACE]:
if self.pos != -1:
self.toggleChoice(self.choices[self.pos])
elif key == KEY_LEFT:
if self.pos < 0:
self.pos = len(self.choices)-1
else:
self.pos -= 1
elif key == KEY_RIGHT:
if self.pos >= len(self.choices)-1:
self.pos = -1
else:
self.pos += 1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def genString(self, lst):
res = ""
for x in lst:
res += self.description[x]+" "
return res
def getText(self):
return self.genString(self.value)
def getMulti(self, selected):
if not selected or self.pos == -1:
return ("text", self.genString(self.value))
else:
tmp = self.value[:]
ch = self.choices[self.pos]
mem = ch in self.value
if not mem:
tmp.append(ch)
tmp.sort()
ind = tmp.index(ch)
val1 = self.genString(tmp[:ind])
val2 = " "+self.genString(tmp[ind+1:])
if mem:
chstr = " "+self.description[ch]+" "
else:
chstr = "("+self.description[ch]+")"
len_val1 = len(val1)
return ("mtext", val1+chstr+val2, range(len_val1, len_val1 + len(chstr)))
def onDeselect(self, session):
self.pos = -1
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value[:]
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
description = property(lambda self: descriptionList(self.choices.choices, choicesList.LIST_TYPE_LIST))
class ConfigLocations(ConfigElement):
def __init__(self, default = [], visible_width = False):
ConfigElement.__init__(self)
self.visible_width = visible_width
self.pos = -1
self.default = default
self.locations = []
self.mountpoints = []
self.value = default[:]
def setValue(self, value):
locations = self.locations
loc = [x[0] for x in locations if x[3]]
add = [x for x in value if not x in loc]
diff = add + [x for x in loc if not x in value]
locations = [x for x in locations if not x[0] in diff] + [[x, self.getMountpoint(x), True, True] for x in add]
locations.sort(key = lambda x: x[0])
self.locations = locations
self.changed()
def getValue(self):
self.checkChangedMountpoints()
locations = self.locations
for x in locations:
x[3] = x[2]
return [x[0] for x in locations if x[3]]
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
locations = [[x, None, False, False] for x in tmp]
self.refreshMountpoints()
for x in locations:
if fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
self.locations = locations
def save(self):
locations = self.locations
if self.save_disabled or not locations:
self.saved_value = None
else:
self.saved_value = self.tostring([x[0] for x in locations])
def isChanged(self):
sv = self.saved_value
locations = self.locations
if val is None and not locations:
return False
return self.tostring([x[0] for x in locations]) != sv
def addedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = True
elif x[1] == None and fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
def removedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = False
def refreshMountpoints(self):
self.mountpoints = [p.mountpoint for p in harddiskmanager.getMountedPartitions() if p.mountpoint != "/"]
self.mountpoints.sort(key = lambda x: -len(x))
def checkChangedMountpoints(self):
oldmounts = self.mountpoints
self.refreshMountpoints()
newmounts = self.mountpoints
if oldmounts == newmounts:
return
for x in oldmounts:
if not x in newmounts:
self.removedMount(x)
for x in newmounts:
if not x in oldmounts:
self.addedMount(x)
def getMountpoint(self, file):
file = os_path.realpath(file)+"/"
for m in self.mountpoints:
if file.startswith(m):
return m
return None
def handleKey(self, key):
if key == KEY_LEFT:
self.pos -= 1
if self.pos < -1:
self.pos = len(self.value)-1
elif key == KEY_RIGHT:
self.pos += 1
if self.pos >= len(self.value):
self.pos = -1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def getText(self):
return " ".join(self.value)
def getMulti(self, selected):
if not selected:
valstr = " ".join(self.value)
if self.visible_width and len(valstr) > self.visible_width:
return ("text", valstr[0:self.visible_width])
else:
return ("text", valstr)
else:
i = 0
valstr = ""
ind1 = 0
ind2 = 0
for val in self.value:
if i == self.pos:
ind1 = len(valstr)
valstr += str(val)+" "
if i == self.pos:
ind2 = len(valstr)
i += 1
if self.visible_width and len(valstr) > self.visible_width:
if ind1+1 < self.visible_width/2:
off = 0
else:
off = min(ind1+1-self.visible_width/2, len(valstr)-self.visible_width)
return ("mtext", valstr[off:off+self.visible_width], range(ind1-off,ind2-off))
else:
return ("mtext", valstr, range(ind1,ind2))
def onDeselect(self, session):
self.pos = -1
# nothing.
class ConfigNothing(ConfigSelection):
def __init__(self):
ConfigSelection.__init__(self, choices = [("","")])
# until here, 'saved_value' always had to be a *string*.
# now, in ConfigSubsection, and only there, saved_value
# is a dict, essentially forming a tree.
#
# config.foo.bar=True
# config.foobar=False
#
# turns into:
# config.saved_value == {"foo": {"bar": "True"}, "foobar": "False"}
#
class ConfigSubsectionContent(object):
pass
# we store a backup of the loaded configuration
# data in self.stored_values, to be able to deploy
# them when a new config element will be added,
# so non-default values are instantly available
# A list, for example:
# config.dipswitches = ConfigSubList()
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
class ConfigSubList(list, object):
def __init__(self):
list.__init__(self)
self.stored_values = {}
def save(self):
for x in self:
x.save()
def load(self):
for x in self:
x.load()
def getSavedValue(self):
res = { }
for i, val in enumerate(self):
sv = val.saved_value
if sv is not None:
res[str(i)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.stored_values.items():
if int(key) < len(self):
self[int(key)].saved_value = val
saved_value = property(getSavedValue, setSavedValue)
def append(self, item):
i = str(len(self))
list.append(self, item)
if i in self.stored_values:
item.saved_value = self.stored_values[i]
item.load()
def dict(self):
return dict([(str(index), value) for index, value in enumerate(self)])
# same as ConfigSubList, just as a dictionary.
# care must be taken that the 'key' has a proper
# str() method, because it will be used in the config
# file.
class ConfigSubDict(dict, object):
def __init__(self):
dict.__init__(self)
self.stored_values = {}
def save(self):
for x in self.values():
x.save()
def load(self):
for x in self.values():
x.load()
def getSavedValue(self):
res = {}
for (key, val) in self.items():
sv = val.saved_value
if sv is not None:
res[str(key)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.items():
if str(key) in self.stored_values:
val.saved_value = self.stored_values[str(key)]
saved_value = property(getSavedValue, setSavedValue)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if str(key) in self.stored_values:
item.saved_value = self.stored_values[str(key)]
item.load()
def dict(self):
return self
# Like the classes above, just with a more "native"
# syntax.
#
# some evil stuff must be done to allow instant
# loading of added elements. this is why this class
# is so complex.
#
# we need the 'content' because we overwrite
# __setattr__.
# If you don't understand this, try adding
# __setattr__ to a usual exisiting class and you will.
class ConfigSubsection(object):
def __init__(self):
self.__dict__["content"] = ConfigSubsectionContent()
self.content.items = { }
self.content.stored_values = { }
def __setattr__(self, name, value):
if name == "saved_value":
return self.setSavedValue(value)
assert isinstance(value, (ConfigSubsection, ConfigElement, ConfigSubList, ConfigSubDict)), "ConfigSubsections can only store ConfigSubsections, ConfigSubLists, ConfigSubDicts or ConfigElements"
content = self.content
content.items[name] = value
x = content.stored_values.get(name, None)
if x is not None:
#print "ok, now we have a new item,", name, "and have the following value for it:", x
value.saved_value = x
value.load()
def __getattr__(self, name):
return self.content.items[name]
def getSavedValue(self):
res = self.content.stored_values
for (key, val) in self.content.items.items():
sv = val.saved_value
if sv is not None:
res[key] = sv
elif key in res:
del res[key]
return res
def setSavedValue(self, values):
values = dict(values)
self.content.stored_values = values
for (key, val) in self.content.items.items():
value = values.get(key, None)
if value is not None:
val.saved_value = value
saved_value = property(getSavedValue, setSavedValue)
def save(self):
for x in self.content.items.values():
x.save()
def load(self):
for x in self.content.items.values():
x.load()
def dict(self):
return self.content.items
# the root config object, which also can "pickle" (=serialize)
# down the whole config tree.
#
# we try to keep non-existing config entries, to apply them whenever
# a new config entry is added to a subsection
# also, non-existing config entries will be saved, so they won't be
# lost when a config entry disappears.
class Config(ConfigSubsection):
def __init__(self):
ConfigSubsection.__init__(self)
def pickle_this(self, prefix, topickle, result):
for (key, val) in topickle.items():
name = '.'.join((prefix, key))
if isinstance(val, dict):
self.pickle_this(name, val, result)
elif isinstance(val, tuple):
result += [name, '=', val[0], '\n']
else:
result += [name, '=', val, '\n']
def pickle(self):
result = []
self.pickle_this("config", self.saved_value, result)
return ''.join(result)
def unpickle(self, lines, base_file=True):
tree = { }
configbase = tree.setdefault("config", {})
for l in lines:
if not l or l[0] == '#':
continue
result = l.split('=', 1)
if len(result) != 2:
continue
(name, val) = result
val = val.strip()
names = name.split('.')
base = configbase
for n in names[1:-1]:
base = base.setdefault(n, {})
base[names[-1]] = val
if not base_file: # not the initial config file..
#update config.x.y.value when exist
try:
configEntry = eval(name)
if configEntry is not None:
configEntry.value = val
except (SyntaxError, KeyError):
pass
# we inherit from ConfigSubsection, so ...
#object.__setattr__(self, "saved_value", tree["config"])
if "config" in tree:
self.setSavedValue(tree["config"])
def saveToFile(self, filename):
text = self.pickle()
try:
import os
f = open(filename + ".writing", "w")
f.write(text)
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(filename + ".writing", filename)
except IOError:
print "Config: Couldn't write %s" % filename
def loadFromFile(self, filename, base_file=True):
f = open(filename, "r")
self.unpickle(f.readlines(), base_file)
f.close()
config = Config()
config.misc = ConfigSubsection()
class ConfigFile:
CONFIG_FILE = resolveFilename(SCOPE_CONFIG, "settings")
def load(self):
try:
config.loadFromFile(self.CONFIG_FILE, True)
except IOError, e:
print "unable to load config (%s), assuming defaults..." % str(e)
def save(self):
# config.save()
config.saveToFile(self.CONFIG_FILE)
def __resolveValue(self, pickles, cmap):
key = pickles[0]
if cmap.has_key(key):
if len(pickles) > 1:
return self.__resolveValue(pickles[1:], cmap[key].dict())
else:
return str(cmap[key].value)
return None
def getResolvedKey(self, key):
names = key.split('.')
if len(names) > 1:
if names[0] == "config":
ret=self.__resolveValue(names[1:], config.content.items)
if ret and len(ret):
return ret
print "getResolvedKey", key, "failed !! (Typo??)"
return ""
def NoSave(element):
element.disableSave()
return element
configfile = ConfigFile()
configfile.load()
def getConfigListEntry(*args):
assert len(args) > 1, "getConfigListEntry needs a minimum of two arguments (descr, configElement)"
return args
def updateConfigElement(element, newelement):
newelement.value = element.value
return newelement
#def _(x):
# return x
#
#config.bla = ConfigSubsection()
#config.bla.test = ConfigYesNo()
#config.nim = ConfigSubList()
#config.nim.append(ConfigSubsection())
#config.nim[0].bla = ConfigYesNo()
#config.nim.append(ConfigSubsection())
#config.nim[1].bla = ConfigYesNo()
#config.nim[1].blub = ConfigYesNo()
#config.arg = ConfigSubDict()
#config.arg["Hello"] = ConfigYesNo()
#
#config.arg["Hello"].handleKey(KEY_RIGHT)
#config.arg["Hello"].handleKey(KEY_RIGHT)
#
##config.saved_value
#
##configfile.save()
#config.save()
#print config.pickle()
cec_limits = [(0,15),(0,15),(0,15),(0,15)]
class ConfigCECAddress(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = cec_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:(self.marked_block)])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return (value, mBlock)
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return ("mtext"[1-selected:], value, mBlock)
else:
return ("text", value)
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
| project-magpie/enigma2-openpli | lib/python/Components/config.py | Python | gpl-2.0 | 49,143 |
# -*- coding:utf-8 -*-
## src/common/dbus_support.py
##
## Copyright (C) 2005 Andrew Sayman <lorien420 AT myrealbox.com>
## Dimitur Kirov <dkirov AT gmail.com>
## Copyright (C) 2005-2006 Nikos Kouremenos <kourem AT gmail.com>
## Copyright (C) 2005-2007 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2006 Jean-Marie Traissard <jim AT lapin.org>
## Stefan Bethge <stefan AT lanpartei.de>
## Copyright (C) 2008 Jonathan Schleifer <js-gajim AT webkeks.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import os, sys
from common import gajim
from common import exceptions
_GAJIM_ERROR_IFACE = 'org.gajim.dbus.Error'
try:
if sys.platform == 'darwin':
try:
import osx.dbus
osx.dbus.load(True)
except ImportError:
pass
import dbus
import dbus.service
import dbus.glib
supported = True # does user have D-Bus bindings?
except ImportError:
supported = False
if not os.name == 'nt': # only say that to non Windows users
print _('D-Bus python bindings are missing in this computer')
print _('D-Bus capabilities of Gajim cannot be used')
class SystemBus:
'''A Singleton for the DBus SystemBus'''
def __init__(self):
self.system_bus = None
def SystemBus(self):
if not supported:
raise exceptions.DbusNotSupported
if not self.present():
raise exceptions.SystemBusNotPresent
return self.system_bus
def bus(self):
return self.SystemBus()
def present(self):
if not supported:
return False
if self.system_bus is None:
try:
self.system_bus = dbus.SystemBus()
except dbus.DBusException:
self.system_bus = None
return False
if self.system_bus is None:
return False
# Don't exit Gajim when dbus is stopped
self.system_bus.set_exit_on_disconnect(False)
return True
system_bus = SystemBus()
class SessionBus:
'''A Singleton for the D-Bus SessionBus'''
def __init__(self):
self.session_bus = None
def SessionBus(self):
if not supported:
raise exceptions.DbusNotSupported
if not self.present():
raise exceptions.SessionBusNotPresent
return self.session_bus
def bus(self):
return self.SessionBus()
def present(self):
if not supported:
return False
if self.session_bus is None:
try:
self.session_bus = dbus.SessionBus()
except dbus.DBusException:
self.session_bus = None
return False
if self.session_bus is None:
return False
return True
session_bus = SessionBus()
def get_interface(interface, path):
'''Returns an interface on the current SessionBus. If the interface isn\'t
running, it tries to start it first.'''
if not supported:
return None
if session_bus.present():
bus = session_bus.SessionBus()
else:
return None
try:
obj = bus.get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
dbus_iface = dbus.Interface(obj, 'org.freedesktop.DBus')
running_services = dbus_iface.ListNames()
started = True
if interface not in running_services:
# try to start the service
if dbus_iface.StartServiceByName(interface, dbus.UInt32(0)) == 1:
started = True
else:
started = False
if not started:
return None
obj = bus.get_object(interface, path)
return dbus.Interface(obj, interface)
except Exception, e:
gajim.log.debug(str(e))
return None
def get_notifications_interface():
'''Returns the notifications interface.'''
return get_interface('org.freedesktop.Notifications',
'/org/freedesktop/Notifications')
if supported:
class MissingArgument(dbus.DBusException):
_dbus_error_name = _GAJIM_ERROR_IFACE + '.MissingArgument'
class InvalidArgument(dbus.DBusException):
'''Raised when one of the provided arguments is invalid.'''
_dbus_error_name = _GAJIM_ERROR_IFACE + '.InvalidArgument'
# vim: se ts=3:
| kevin-teddy/gajim | src/common/dbus_support.py | Python | gpl-3.0 | 4,314 |
from django.contrib import admin
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from offensivecontent.models import OffensiveContent, OffensiveContentData
try:
from offensivecontent import registry
except ImportError:
from registration import registry
def _do_action(modeladmin, request, queryset, method):
for oc in queryset:
controller = registry.get_controller_for_model(
oc.content_type.model_class())
getattr(controller, method)(oc.content_type.get_object_for_this_type(pk=oc.object_id))
return len(queryset)
def _get_message(rows_updated, app_text, app_text_plural, result_text):
if rows_updated == 1:
message_bit = "1 %s was" % app_text
else:
message_bit = "%s %s were" % (rows_updated, app_text_plural)
return "%s sucessfully %s" % (message_bit, result_text)
class OffensiveContentDataInline(admin.TabularInline):
model = OffensiveContentData
raw_id_fields = ('user',)
class OffensiveContentAdmin(admin.ModelAdmin):
list_display = ('get_content_text', 'is_safe', 'number_of_submitters', 'latest', 'moderator_actions')
inlines = [OffensiveContentDataInline,]
actions = ['disable_content', 'enable_content', 'disable_user',
'enable_user', 'mark_safe', 'mark_unsafe']
date_hierarchy = "latest"
list_filter = ["is_safe","content_type"]
actions_on_bottom = True
def get_content_text(self, obj):
if isinstance(obj.content_object, Comment):
return "%s: %s" % (obj.content_object.name, obj.content_object.comment)
return obj.content_object
get_content_text.short_description = "Content Text"
def number_of_submitters(self, obj):
return str(OffensiveContentData.objects.filter(offensive_content__pk=obj.pk).count())
number_of_submitters.short_description = "Num. Of Marks"
def is_content_enabled(self, obj):
controller = registry.get_controller_for_model(obj.content_type.model_class())
return controller.is_content_enabled(obj.content_type.get_object_for_this_type(pk=obj.object_id))
is_content_enabled.short_description = "Is Content Enabled"
def is_content_user_enabled(self, obj):
controller = registry.get_controller_for_model(obj.content_type.model_class())
return controller.is_content_user_enabled(obj.content_type.get_object_for_this_type(pk=obj.object_id))
is_content_user_enabled.short_description = "Is Content User Enabled"
def disable_content(self, request, queryset):
rows_updated = _do_action(self, request, queryset, 'disable_content')
self.message_user(
request, _get_message(
rows_updated, 'content', 'contents', 'disabled'))
disable_content.short_description = "Disable selected content."
def enable_content(self, request, queryset):
rows_updated = _do_action(self, request, queryset, 'enable_content')
self.message_user(
request, _get_message(
rows_updated, 'content', 'contents', 'enabled'))
enable_content.short_description = "Enable selected content."
def disable_user(self, request, queryset):
rows_updated = _do_action(self, request, queryset, 'disable_user')
self.message_user(
request, _get_message(
rows_updated, 'content user', 'content users', 'disabled'))
disable_user.short_description = "Disable selected content user."
def enable_user(self, request, queryset):
rows_updated = _do_action(self, request, queryset, 'enable_user')
self.message_user(
request, _get_message(
rows_updated, 'content user', 'content users', 'enabled'))
enable_user.short_description = "Enable selected content user."
def mark_safe(self, request, queryset):
rows_updated = queryset.update(is_safe=True)
_do_action(self, request, queryset, 'enable_content')
self.message_user(
request, _get_message(
rows_updated, 'content', 'contents', 'marked safe'))
mark_safe.short_description = "Mark selected content safe."
def mark_unsafe(self, request, queryset):
rows_updated = queryset.update(is_safe=False)
_do_action(self, request, queryset, 'disable_content')
self.message_user(
request, _get_message(
rows_updated, 'content', 'contents', 'marked unsafe'))
mark_unsafe.short_description = "Mark selected content unsafe."
class OffensiveContentDataAdmin(admin.ModelAdmin):
list_display = ('content_label', 'user', 'comment', 'pub_date',)
raw_id_fields = ('user',)
def content_label(self, obj):
return "%s (%s)" % (str(obj.offensive_content), str(obj.offensive_content.content_type))
content_label.short_description = "Content"
admin.site.register(OffensiveContent, OffensiveContentAdmin)
admin.site.register(OffensiveContentData, OffensiveContentDataAdmin) | callowayproject/django-offensivecontent | offensivecontent/admin.py | Python | apache-2.0 | 5,082 |
import csv
import boto3
import io
import tempfile
import logging
from django.conf import settings
from .models import (
HouseholdServiceTotal,
HouseholdBillTotal,
DataSetFile,
BudgetPhase,
HouseholdService,
FinancialYear,
HouseholdClass,
)
from scorecard.models import Geography
from django.db import transaction
from django.db import IntegrityError
log = logging.getLogger("household.upload")
def amazon_s3(file_name):
"""
Get file from amazon and process
"""
temp_file = tempfile.NamedTemporaryFile()
s3 = boto3.client(
"s3",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
s3.download_fileobj(
settings.AWS_STORAGE_BUCKET_NAME, "media/" + file_name, temp_file
)
return temp_file
def import_bill_data(id):
"""
get file and pass to apporiate model
"""
csv_obj = DataSetFile.objects.get(id=id)
if csv_obj.file_type == "Service":
household_service_total(csv_obj)
log.info("Found correct csv Service bill, moving to Service Total")
elif csv_obj.file_type == "Bill":
print("Found correct bill, moving to Bill Totals")
log.info("Found correct bill, moving to Bill Totals")
household_bill_total(csv_obj)
else:
log.error("Unknown household csv type")
raise Exception("csv type unknown")
def household_service_total(csv_obj):
log.info("Working on service totals")
csv_file = amazon_s3(csv_obj.csv_file.name)
csv_file.seek(0)
with open(csv_file.name, "r") as new_file:
reader = csv.DictReader(new_file)
for row in reader:
geography = Geography.objects.get(geo_code=row["Geography"])
financial_year = FinancialYear.objects.get(
budget_year=row["Financial Year"]
)
budget_phase = BudgetPhase.objects.get(name=row["Budget Phase"])
household_class = HouseholdClass.objects.get(name=row["Class"])
service = HouseholdService.objects.get(name=row["Service Name"])
total = row["Total"] if row["Total"] else None
try:
HouseholdServiceTotal.objects.create(
geography=geography,
financial_year=financial_year,
budget_phase=budget_phase,
household_class=household_class,
version=csv_obj.version,
service=service,
total=total,
)
except IntegrityError:
log.warn(
"Service total for budget phase and financial year already exists"
)
csv_file.close()
log.info("Completed working on service totals")
def household_bill_total(csv_obj):
log.info("Working on total bill totals")
csv_file = amazon_s3(csv_obj.csv_file.name)
csv_file.seek(0)
with open(csv_file.name, "r") as new_file:
reader = csv.DictReader(new_file)
print("Working on {}")
for row in reader:
geography = Geography.objects.get(geo_code=row["Geography"])
financial_year = FinancialYear.objects.get(
budget_year=row["Financial Year"]
)
budget_phase = BudgetPhase.objects.get(name=row["Budget Phase"])
household_class = HouseholdClass.objects.get(name=row["Class"])
percent = row["Percent Increase"] if row["Percent Increase"] else None
total = row["Total"] if row["Total"] else None
try:
HouseholdBillTotal.objects.create(
geography=geography,
financial_year=financial_year,
budget_phase=budget_phase,
household_class=household_class,
version=csv_obj.version,
percent=percent,
total=total,
)
except IntegrityError:
log.warn(
"Bill total for budget phase and financial year already exists"
)
csv_file.close()
log.info("Completed working on bill totals")
| Code4SA/municipal-data | household/upload.py | Python | mit | 4,221 |
#!/usr/bin/python2
'''
Based on http://jeremyblythe.blogspot.co.uk code
2015-05-29: changed to use official Python wrapper from Google to avoid deprecated ClientLogin (stef@nstrahl.de)
== Installation of PyDrive and OAuth2 ==
> apt-get install python-pip
> pip install PyDrive
== Step 1: register pydrive itself ==
Go to https://code.google.com/apis/console (you need to be logged into Google)
Create your own project (e.g. 'pydrive').
On "APIs & auth -> APIs" menu enable Drive API
On "APIs & auth -> Credentials" menu, create OAuth2.0 Client ID.
Select Application type to be a "Installed application" and type "other"
Download JSON file
rename to client_secrets.json and copy in same path as this python script
== Step 2: obtain OAuth2.0 authorization key for our Google Account ==
> python
>>> from pydrive.auth import GoogleAuth
>>> from pydrive.drive import GoogleDrive
>>> gauth = GoogleAuth()
>>> gauth.CommandLineAuth()
Visit Webpage as instructed and copy&pase verification key
>>> gauth.SaveCredentialsFile("pydrive_auth.txt")
Motion Uploader - uploads pictures & videos to Google Drive
'''
import smtplib
from datetime import datetime
import os.path
import sys
import ConfigParser
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
class MotionUploader:
def __init__(self, config_file_path):
# Load config
config = ConfigParser.ConfigParser()
config.read(config_file_path)
# GMail account credentials
self.username = config.get('gmail', 'user')
self.password = config.get('gmail', 'password')
self.from_name = config.get('gmail', 'name')
self.sender = config.get('gmail', 'sender')
# Recipient email address (could be same as from_addr)
self.recipient = config.get('gmail', 'recipient').split(';')
self.snapshotrecipient = config.get('gmail', 'snapshotrecipient').split(';')
# Subject line for email
self.subject = config.get('gmail', 'subject')
# First line of email message
self.message = config.get('gmail', 'message')
# Folder (or collection) in Docs where you want the pictures & videos to go
self.folder = config.get('docs', 'folder')
# Options
self.delete_after_upload = config.getboolean('options', 'delete-after-upload')
self.send_email = config.getboolean('options', 'send-email')
self._create_gdata_client()
def _create_gdata_client(self):
"""Create a pydrive oonnection."""
self.gauth = GoogleAuth()
self.gauth.LoadCredentialsFile("pydrive_auth.txt")
if self.gauth.credentials is None:
self.gauth.CommandLineAuth() # Authenticate if they're not there
elif self.gauth.access_token_expired:
self.gauth.Refresh() # Refresh them if expired
else:
self.gauth.Authorize() # Initialize the saved creds
self.gauth.SaveCredentialsFile("pydrive_auth.txt") # Save the current credentials to a file
self.drive = GoogleDrive(self.gauth)# Initialize the saved creds
def _get_folder_resource(self):
"""Find and return the resource whose title matches the given folder."""
return self.drive.ListFile({'q': "title='{}' and mimeType contains 'application/vnd.google-apps.folder' and trashed=false".format(self.folder)}).GetList()[0]
def _send_email(self,msg):
'''Send an email using the GMail account.'''
senddate=datetime.strftime(datetime.now(), '%Y-%m-%d')
m="Date: %s\r\nFrom: %s <%s>\r\nTo: %s\r\nSubject: %s\r\nX-Mailer: My-Mail\r\n\r\n" % (senddate, self.from_name, self.sender, ", ".join(self.recipient), self.subject)
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(self.username, self.password)
server.sendmail(self.sender, self.recipient, m+msg)
server.quit()
def media_upload(self, file_path, folder_resource):
'''Upload the file and return the doc'''
print('Uploading image')
doc = self.drive.CreateFile({'title':os.path.basename(file_path), 'parents':[{u'id': folder_resource['id']}]})
doc.SetContentFile(file_path)
doc.Upload()
return doc
def upload_file(self, file_path):
"""Upload a picture / video to the specified folder. Then optionally send an email and optionally delete the local file."""
folder_resource = self._get_folder_resource()
if not folder_resource:
raise Exception('Could not find the %s folder' % self.folder)
doc = self.media_upload(file_path, folder_resource)
if self.send_email:
if file_path.split('.')[-2][-8:] == 'snapshot':
self.recipient = self.recipient + self.snapshotrecipient
thumbnail_link = doc['thumbnailLink'] # unused at the moment
media_link = doc['alternateLink']
# Send an email with thumbnail and link
msg = self.message
msg += '\n\n' + media_link
self._send_email(msg)
if self.delete_after_upload:
os.remove(file_path)
if __name__ == '__main__':
try:
if len(sys.argv) < 3:
exit('uploads pictures / videos to Google Drive\n Usage: uploader.py {config-file-path} {media-file-path}')
cfg_path = sys.argv[1]
media_path = sys.argv[2]
if not os.path.exists(cfg_path):
exit('Config file does not exist [%s]' % cfg_path)
if not os.path.exists(media_path):
exit('Picture / Video file does not exist [%s]' % media_path)
MotionUploader(cfg_path).upload_file(media_path)
except Exception as e:
exit('Error: [%s]' % e)
| strahl/gdrive_uploader | uploader.py | Python | gpl-2.0 | 5,852 |
import logging
from datetime import datetime
from google.appengine.api.taskqueue import Task
from gcp_census.bigquery.bigquery_table_metadata import BigQueryTableMetadata
from gcp_census.bigquery.bigquery_table_streamer import BigQueryTableStreamer
from gcp_census.tasks import Tasks
class BigQueryTask(object):
def __init__(self, big_query):
self.big_query = big_query
self.table_streamer = BigQueryTableStreamer(big_query)
def schedule_task_for_each_project(self):
tasks = self.create_project_tasks(self.big_query.list_project_ids())
Tasks.schedule(queue_name='bigquery-list', tasks=tasks)
def schedule_task_for_each_dataset(self, project_id):
tasks = self.create_dataset_tasks(project_id, self.big_query.
list_dataset_ids(project_id))
Tasks.schedule(queue_name='bigquery-list', tasks=tasks)
def schedule_task_for_each_table(self, project_id, dataset_id,
page_token=None):
list_response = self.big_query.list_tables(project_id, dataset_id,
page_token=page_token)
if 'tables' in list_response:
table_id_list = [table['tableReference']['tableId']
for table in list_response['tables']]
tasks = self.create_table_tasks(project_id, dataset_id,
table_id_list)
Tasks.schedule(queue_name='bigquery-tables', tasks=tasks)
else:
logging.info("Dataset %s.%s is empty", project_id, dataset_id)
return
if 'nextPageToken' in list_response:
url = '/bigQuery/project/%s/dataset/%s?pageToken=%s' % (
project_id, dataset_id, list_response['nextPageToken'])
task_name = '%s-%s-%s-%s' % (project_id, dataset_id,
list_response['nextPageToken'],
datetime.utcnow().strftime("%Y%m%d"))
next_task = Task(
method='GET',
url=url,
name=task_name)
Tasks.schedule(queue_name='bigquery-list', tasks=[next_task])
else:
logging.info("There is no more tables in this dataset")
def schedule_task_for_each_partition(self, project_id, dataset_id, table_id,
partitions):
tasks = self.create_partition_tasks(project_id, dataset_id, table_id,
partitions)
Tasks.schedule(queue_name='bigquery-partitions', tasks=tasks)
def stream_table_metadata(self, project_id, dataset_id, table_id):
table = self.big_query.get_table(project_id, dataset_id, table_id)
if table:
table_metadata = BigQueryTableMetadata(table)
partitions = []
if table_metadata.is_daily_partitioned():
partitions = self.big_query. \
list_table_partitions(project_id, dataset_id, table_id)
self.schedule_task_for_each_partition(project_id,
dataset_id,
table_id,
partitions)
self.table_streamer.stream_metadata(table_metadata, partitions)
def create_partition_tasks(self, project_id, dataset_id, table_id,
partitions):
table_id_list = ["{}${}".format(table_id, partition['partitionId'])
for partition in partitions]
return self.create_table_tasks(project_id, dataset_id, table_id_list)
@staticmethod
def create_project_tasks(project_id_list):
for project_id in project_id_list:
yield Task(method='GET',
url='/bigQuery/project/%s' % project_id)
@staticmethod
def create_dataset_tasks(project_id, dataset_id_list):
for dataset_id in dataset_id_list:
yield Task(method='GET',
url='/bigQuery/project/%s/dataset/%s'
% (project_id, dataset_id))
@staticmethod
def create_table_tasks(project_id, dataset_id, table_id_list):
for table_id in table_id_list:
yield Task(method='GET',
url='/bigQuery/project/%s/dataset/%s/table/%s'
% (project_id, dataset_id, table_id))
| ocadotechnology/gcp-census | gcp_census/bigquery/bigquery_task.py | Python | apache-2.0 | 4,536 |
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# imports
import shlex
import string
# slam data class
class slamData():
"""
SLAM data container
Attributes
---------
landmarkX: array
Array of landmarks x position
landmarkY: array
Array of landmarks y position
poseX: array
Array of poses x position
poseY: array
Array of poses y position
poseA: array
Array of poses orentation (angle)
"""
def __init__(self, dataFilename):
self.landmarkX = []
self.landmarkY = []
self.poseX = []
self.poseY = []
self.poseA = []
self.getDataFromFile(dataFilename)
def getDataFromFile(self, dataFilename):
"""
Fills object arrays with SLAM data given by file
Parameters
----------
dataFilename: string
filename with SLAM data in g2o format
"""
f = open(dataFilename, 'r')
# get data loop
for line in f:
# split string
lineWords = shlex.split(line)
if string.find(lineWords[0], "VERTEX_SE2") != -1:
# get robot pose
self.poseX.append(float(lineWords[2]))
self.poseY.append(float(lineWords[3]))
self.poseA.append(float(lineWords[4]))
elif string.find(lineWords[0], "VERTEX_XY") != -1:
# get landmark position
self.landmarkX.append(float(lineWords[2]))
self.landmarkY.append(float(lineWords[3]))
f.close()
| francocurotto/GraphSLAM | src/python-helpers/commons/slamData.py | Python | gpl-3.0 | 2,205 |
# vim: set et ts=4 sw=4 fdm=marker
"""
MIT License
Copyright (c) 2016 Jesse Hogan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# TODO Write Tests
from accounts import *
from entities import *
from logs import *
from pprint import pprint
import os
import yaml
class configfile(entity):
_instance = None
def __init__(self):
self.isloaded = False
try:
self.file = os.environ['EPIPHANY_YAML']
except KeyError:
pass
@classmethod
def getinstance(cls):
if cls._instance == None:
cls._instance = configfile()
return cls._instance
@property
def file(self):
return self._file
@file.setter
def file(self, v):
self._file = v
self.load()
@property
def accounts(self):
return self._accounts
@accounts.setter
def accounts(self, v):
self._accounts = v
@property
def logs(self):
return self._logs
@logs.setter
def logs(self, v):
self._log = v
def clear(self):
self._accounts = accounts()
self._logs = logs()
self.isloaded = False
def load(self):
self.clear()
try:
with open(self.file, 'r') as stream:
self._cfg = yaml.load(stream)
for acct in self['accounts']:
self.accounts += account.create(acct)
for d in self['logs']:
l = log.create(d)
self.logs += l
except:
raise
else:
self.isloaded = True
def __getitem__(self, key):
return self._cfg[key]
@property
def inproduction(self):
try:
env = self['environment'].lower()
except KeyError:
raise Exception('No environment value set in config file.')
return env in ['prd', 'production', 'live']
| jhogan/commonpy | configfile.py | Python | mit | 2,882 |
#from flask import render_template
#from flask.ext.mail import Message
from tinysmtp import Connection, Message
from decorators import async
#from almlogic import almlogic #import this to get config vars
#almlogic.config.from_object('config')
#from almlogic import models
@async
def send_async_email(msg):
#send email with tinysmtp TODO: Use constants from config.py instead here. This works for now. Baby needs a bath.
with Connection(hostname='smtp.googlemail.com',port=465, username='calzone.test', password='aeiouaeiou', ssl=False, tls=True) as conn:
conn.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject = subject, sender = sender, recipients = recipients)
msg.body = text_body
msg.html = html_body
send_async_email(msg)
def alarm_notification(ZoneName):
#pull users from DB (TODO: This is untested)
# EmailUsers = models.Email.query.all()
# MainAdmin = models.User.query.filter_by(role = '1').first()
send_email(subject = "Zone: " + ZoneName + " Alarming Now!",
sender = "Calzone", #sender
recipients = ['evanmj@gmail.com'], #EmailUsers, #recipeients
text_body = "Zone: " + ZoneName + " Alarming Now!", # plain text users get this
html_body = "Zone: " + ZoneName + " Alarming Now!") # html users get this
#hack to make fake context requests to render templates
#def create_email(TemplateName,ArgName):
# with almlogic.test_request_context('/send_email'):
# return render_template(TemplateName, ZoneName=ArgName)
| evanmj/calzone | almlogic/emails.py | Python | bsd-3-clause | 1,575 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'partner_sequence':fields.char('Number'),
}
def create(self, cr, uid, vals , context=None):
code = self.pool.get('ir.sequence').get(cr, uid, 'res.partner')
vals['partner_sequence'] = code
return super(res_partner, self).create(cr, uid, vals, context=context)
| alanljj/tkobr-addons | tko_partner_sequence/partner.py | Python | agpl-3.0 | 1,524 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class HomeWork02Remove(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox(capabilities={'native_events': False}, )
self.driver.implicitly_wait(2)
self.base_url = "http://localhost/"
self.verificationErrors = []
self.accept_next_alert = True
def test_home_work02_01_remove(self):
driver = self.driver
driver.get(self.base_url + "/php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
driver.find_element_by_css_selector(".movie_box > div.movie_cover > div.nocover").click()
driver.find_element_by_css_selector("img[alt=\"Remove\"]").click()#find_element_by_xpath("..").click()
self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Are you sure you want to remove this[\s\S]$")
driver.find_element_by_css_selector("h1").click()
self.assertEquals(len(driver.find_elements_by_css_selector("div.nocover")), 0)
driver.find_element_by_link_text("Log out").click()
self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Are you sure you want to log out[\s\S]$")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| anache/ana-se-python-17 | selenium-py-training-chernova/php4dvd/2016_05_08_HW_02/test_hw_02_02_remove.py | Python | apache-2.0 | 2,547 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojom
import mojom_pack
import mojom_test
import sys
EXPECT_EQ = mojom_test.EXPECT_EQ
EXPECT_TRUE = mojom_test.EXPECT_TRUE
RunTest = mojom_test.RunTest
def TestOrdinalOrder():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT32, 2)
struct.AddField('testfield2', mojom.INT32, 1)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(2, len(ps.packed_fields))
errors += EXPECT_EQ('testfield2', ps.packed_fields[0].field.name)
errors += EXPECT_EQ('testfield1', ps.packed_fields[1].field.name)
return errors
def TestZeroFields():
errors = 0
struct = mojom.Struct('test')
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(0, len(ps.packed_fields))
return errors
def TestOneField():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(1, len(ps.packed_fields))
return errors
# Pass three tuples.
# |kinds| is a sequence of mojom.Kinds that specify the fields that are to
# be created.
# |fields| is the expected order of the resulting fields, with the integer
# "1" first.
# |offsets| is the expected order of offsets, with the integer "0" first.
def TestSequence(kinds, fields, offsets):
errors = 0
struct = mojom.Struct('test')
index = 1
for kind in kinds:
struct.AddField("%d" % index, kind)
index += 1
ps = mojom_pack.PackedStruct(struct)
num_fields = len(ps.packed_fields)
errors += EXPECT_EQ(len(kinds), num_fields)
for i in xrange(num_fields):
EXPECT_EQ("%d" % fields[i], ps.packed_fields[i].field.name)
EXPECT_EQ(offsets[i], ps.packed_fields[i].offset)
return errors
def TestPaddingPackedInOrder():
return TestSequence(
(mojom.INT8, mojom.UINT8, mojom.INT32),
(1, 2, 3),
(0, 1, 4))
def TestPaddingPackedOutOfOrder():
return TestSequence(
(mojom.INT8, mojom.INT32, mojom.UINT8),
(1, 3, 2),
(0, 1, 4))
def TestPaddingPackedOverflow():
kinds = (mojom.INT8, mojom.INT32, mojom.INT16, mojom.INT8, mojom.INT8)
# 2 bytes should be packed together first, followed by short, then by int.
fields = (1, 4, 3, 2, 5)
offsets = (0, 1, 2, 4, 8)
return TestSequence(kinds, fields, offsets)
def TestAllTypes():
struct = mojom.Struct('test')
array = mojom.Array()
return TestSequence(
(mojom.BOOL, mojom.INT8, mojom.STRING, mojom.UINT8,
mojom.INT16, mojom.DOUBLE, mojom.UINT16,
mojom.INT32, mojom.UINT32, mojom.INT64,
mojom.FLOAT, mojom.STRING, mojom.HANDLE,
mojom.UINT64, mojom.Struct('test'), mojom.Array()),
(1, 2, 4, 5, 7, 3, 6, 8, 9, 10, 11, 13, 12, 14, 15, 16, 17),
(0, 1, 2, 4, 6, 8, 16, 24, 28, 32, 40, 44, 48, 56, 64, 72, 80))
def TestPaddingPackedOutOfOrderByOrdinal():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
struct.AddField('testfield3', mojom.UINT8, 3)
struct.AddField('testfield2', mojom.INT32, 2)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(3, len(ps.packed_fields))
# Second byte should be packed in behind first, altering order.
errors += EXPECT_EQ('testfield1', ps.packed_fields[0].field.name)
errors += EXPECT_EQ('testfield3', ps.packed_fields[1].field.name)
errors += EXPECT_EQ('testfield2', ps.packed_fields[2].field.name)
# Second byte should be packed with first.
errors += EXPECT_EQ(0, ps.packed_fields[0].offset)
errors += EXPECT_EQ(1, ps.packed_fields[1].offset)
errors += EXPECT_EQ(4, ps.packed_fields[2].offset)
return errors
def TestBools():
errors = 0
struct = mojom.Struct('test')
struct.AddField('bit0', mojom.BOOL)
struct.AddField('bit1', mojom.BOOL)
struct.AddField('int', mojom.INT32)
struct.AddField('bit2', mojom.BOOL)
struct.AddField('bit3', mojom.BOOL)
struct.AddField('bit4', mojom.BOOL)
struct.AddField('bit5', mojom.BOOL)
struct.AddField('bit6', mojom.BOOL)
struct.AddField('bit7', mojom.BOOL)
struct.AddField('bit8', mojom.BOOL)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(10, len(ps.packed_fields))
# First 8 bits packed together.
for i in xrange(8):
pf = ps.packed_fields[i]
errors += EXPECT_EQ(0, pf.offset)
errors += EXPECT_EQ("bit%d" % i, pf.field.name)
errors += EXPECT_EQ(i, pf.bit)
# Ninth bit goes into second byte.
errors += EXPECT_EQ("bit8", ps.packed_fields[8].field.name)
errors += EXPECT_EQ(1, ps.packed_fields[8].offset)
errors += EXPECT_EQ(0, ps.packed_fields[8].bit)
# int comes last.
errors += EXPECT_EQ("int", ps.packed_fields[9].field.name)
errors += EXPECT_EQ(4, ps.packed_fields[9].offset)
return errors
def Main(args):
errors = 0
errors += RunTest(TestZeroFields)
errors += RunTest(TestOneField)
errors += RunTest(TestPaddingPackedInOrder)
errors += RunTest(TestPaddingPackedOutOfOrder)
errors += RunTest(TestPaddingPackedOverflow)
errors += RunTest(TestAllTypes)
errors += RunTest(TestPaddingPackedOutOfOrderByOrdinal)
errors += RunTest(TestBools)
return errors
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| ChromiumWebApps/chromium | mojo/public/bindings/pylib/generate/mojom_pack_tests.py | Python | bsd-3-clause | 5,268 |
import os
import time
import glob
import importlib
import sys
isVerbose = False
thisFile = "all_python"
workDir = os.path.dirname(os.path.realpath(__file__))
thisDir = os.path.dirname(os.path.realpath(__file__))
sys.path.extend([os.path.dirname(workDir) + "/pLib"])
print ".. importing files from this SANDBOX directory \n{}".format(thisDir)
thisDir = "../../../xBed/pLib"
os.chdir(thisDir)
thisDir = os.getcwd()
sys.path.extend([thisDir])
print ".. importing files from xBed/pLib directory \n{}".format(thisDir)
os.chdir(workDir)
print ".. returning to directory \n{}".format(workDir)
print time.strftime("%a %b %d %H:%M:%S %Z %Y")
from config import *
#sys.path.extend(["./pLib","../../../xBed/pLib"])
thisDir = workDir
sandboxPath = os.path.dirname(thisDir)
sandboxName = os.path.basename(sandboxPath)
infoVariablesFile = sandboxName + ".info_variables.txt"
infoVariablesFile = "/".join([sandboxPath,"pLib",infoVariablesFile])
if os.path.isfile(infoVariablesFile):
all_info["sandboxName"] = sandboxName
all_info["sandboxPath"] = sandboxPath
all_info["infoVariablesFile"] = infoVariablesFile
print ".. registered infoVariablesFile as {}".format(infoVariablesFile)
else:
# Error
print "\n".join([
"",
"ERROR from {}".format(thisFile),
".. missing the infoVariablesFile as {}".format(infoVariablesFile)
])
import P
import util
import core
| fbrglez/gitBed | xProj/P.lop/pLib/all_python.py | Python | gpl-2.0 | 1,406 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import model_base
from neutron.db import models_v2
"""New mapping tables."""
class OFCId(object):
"""Resource ID on OpenFlow Controller."""
ofc_id = sa.Column(sa.String(255), unique=True, nullable=False)
class NeutronId(object):
"""Logical ID on Neutron."""
neutron_id = sa.Column(sa.String(36), primary_key=True)
class OFCTenantMapping(model_base.BASEV2, NeutronId, OFCId):
"""Represents a Tenant on OpenFlow Network/Controller."""
class OFCNetworkMapping(model_base.BASEV2, NeutronId, OFCId):
"""Represents a Network on OpenFlow Network/Controller."""
class OFCPortMapping(model_base.BASEV2, NeutronId, OFCId):
"""Represents a Port on OpenFlow Network/Controller."""
class OFCRouterMapping(model_base.BASEV2, NeutronId, OFCId):
"""Represents a router on OpenFlow Network/Controller."""
class OFCFilterMapping(model_base.BASEV2, NeutronId, OFCId):
"""Represents a Filter on OpenFlow Network/Controller."""
class PortInfo(model_base.BASEV2):
"""Represents a Virtual Interface."""
id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
datapath_id = sa.Column(sa.String(36), nullable=False)
port_no = sa.Column(sa.Integer, nullable=False)
vlan_id = sa.Column(sa.Integer, nullable=False)
mac = sa.Column(sa.String(32), nullable=False)
port = orm.relationship(
models_v2.Port,
backref=orm.backref("portinfo",
lazy='joined', uselist=False,
cascade='delete'))
| sajuptpm/neutron-ipam | neutron/plugins/nec/db/models.py | Python | apache-2.0 | 2,354 |
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now, timedelta
from sa_api_v2.models import DataSnapshotRequest
import logging
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Clear any bulk data snapshots (and requests) older than a day.'
def handle(self, *args, **options):
log.info('Clearing bulk data snapshots older than a day')
# Delete requests. Should cascade to snapshots.
cutoff = now() - timedelta(days=1)
DataSnapshotRequest.objects.filter(requested_at__lt=cutoff).delete()
| codeforsanjose/MobilityMapApi | src/sa_api_v2/management/commands/clearsnapshots.py | Python | gpl-3.0 | 603 |
#!/usr/bin/env python
import datetime
import unittest
import inspect
from nose.plugins.attrib import attr
from pyon.util.int_test import IonIntegrationTestCase
from pyon.public import RT, PRED, OT, log, LCE, LCS, AS
from pyon.public import IonObject
from pyon.util.ion_time import IonTime
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.services.sa.iinstrument_management_service import InstrumentManagementServiceClient
from interface.services.coi.iorg_management_service import OrgManagementServiceClient
from interface.services.sa.iobservatory_management_service import ObservatoryManagementServiceClient
from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient
from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from ion.services.dm.utility.granule import RecordDictionaryTool
from ion.services.dm.utility.test.parameter_helper import ParameterHelper
from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient
from ion.services.dm.test.test_dm_end_2_end import DatasetMonitor
from traceback import extract_stack, format_list
import time
import numpy as np
import functools
STAGE_LOAD_ORGS = 1
STAGE_LOAD_PARAMS = 2
STAGE_LOAD_AGENTS = 3
STAGE_LOAD_ASSETS = 4
sep_bar = '----------------------------------------------------------------------'
def assertion_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
stack = extract_stack(limit=4)
try:
func(*args,**kwargs)
return True
except AssertionError as e:
log.error('\n%s\n%s\n%s',sep_bar,''.join(format_list(stack[:-1])), e.message)
return False
return wrapper
@attr('INT', group='sa')
class TestObservatoryManagementFullIntegration(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.RR = ResourceRegistryServiceClient()
self.RR2 = EnhancedResourceRegistryClient(self.RR)
self.OMS = ObservatoryManagementServiceClient()
self.org_management_service = OrgManagementServiceClient()
self.IMS = InstrumentManagementServiceClient()
self.dpclient = DataProductManagementServiceClient()
self.pubsubcli = PubsubManagementServiceClient()
self.damsclient = DataAcquisitionManagementServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self.data_retriever = DataRetrieverServiceClient()
self.data_product_management = DataProductManagementServiceClient()
self._load_stage = 0
self._resources = {}
def preload_ooi(self, stage=STAGE_LOAD_ASSETS):
# Preloads OOI up to a given stage
if self._load_stage >= stage:
return
if self._load_stage < STAGE_LOAD_ORGS:
log.info("--------------------------------------------------------------------------------------------------------")
log.info("Preloading stage: %s (OOIR2 Orgs, users, roles)", STAGE_LOAD_ORGS)
# load_OOIR2_scenario
self.container.spawn_process("Loader", "ion.processes.bootstrap.ion_loader", "IONLoader", config=dict(
op="load",
scenario="OOIR2",
path="master",
))
self._load_stage = STAGE_LOAD_ORGS
if self._load_stage < STAGE_LOAD_PARAMS:
log.info("--------------------------------------------------------------------------------------------------------")
log.info("Preloading stage: %s (BASE params, streamdefs)", STAGE_LOAD_PARAMS)
# load_parameter_scenarios
self.container.spawn_process("Loader", "ion.processes.bootstrap.ion_loader", "IONLoader", config=dict(
op="load",
scenario="BETA",
path="master",
categories="ParameterFunctions,ParameterDefs,ParameterDictionary,StreamDefinition",
clearcols="owner_id,org_ids",
assets="res/preload/r2_ioc/ooi_assets",
parseooi="True",
))
self._load_stage = STAGE_LOAD_PARAMS
if self._load_stage < STAGE_LOAD_AGENTS:
log.info("--------------------------------------------------------------------------------------------------------")
log.info("Preloading stage: %s (OOIR2_I agents, model links)", STAGE_LOAD_AGENTS)
# load_OOIR2_agents
self.container.spawn_process("Loader", "ion.processes.bootstrap.ion_loader", "IONLoader", config=dict(
op="load",
scenario="OOIR2_I",
path="master",
))
self._load_stage = STAGE_LOAD_AGENTS
if self._load_stage < STAGE_LOAD_ASSETS:
log.info("--------------------------------------------------------------------------------------------------------")
log.info("Preloading stage: %s (OOI assets linked to params, agents)", STAGE_LOAD_ASSETS)
# load_ooi_assets
self.container.spawn_process("Loader", "ion.processes.bootstrap.ion_loader", "IONLoader", config=dict(
op="load",
loadooi="True",
path="master",
assets="res/preload/r2_ioc/ooi_assets",
bulk="True",
debug="True",
ooiuntil="9/1/2013",
ooiparams="True",
#excludecategories: DataProduct,DataProductLink,Deployment,Workflow,WorkflowDefinition
))
self._load_stage = STAGE_LOAD_ASSETS
# 'DataProduct,DataProductLink,WorkflowDefinition,ExternalDataProvider,ExternalDatasetModel,ExternalDataset,ExternalDatasetAgent,ExternalDatasetAgentInstance',
@unittest.skip('Work in progress')
def test_observatory(self):
self._load_stage = 0
self._resources = {}
passing = True
self.assertTrue(True)
# LOAD STEP 1
self.preload_ooi(stage=STAGE_LOAD_ORGS)
passing &= self.orguserrole_assertions()
# LOAD STEP 2
self.preload_ooi(stage=STAGE_LOAD_PARAMS)
passing &= self.parameter_assertions()
# LOAD STEP 3
self.preload_ooi(stage=STAGE_LOAD_AGENTS)
passing &= self.agent_assertions()
# LOAD STEP 4
self.preload_ooi(stage=STAGE_LOAD_ASSETS)
# Check OOI preloaded resources to see if they match needs for this test and for correctness
passing &= self.sites_assertions()
passing &= self.device_assertions()
passing &= self.deployment_assertions()
# Extensive tests on select RSN nodes
passing &= self.rsn_node_checks()
# Extensive tests on select RSN instruments
passing &= self.check_rsn_instrument()
passing &= self.check_rsn_instrument_data_product()
# Extensive tests on a glider
#passing &= self.check_glider()
# Extensive tests on a CG assembly
#passing &= self.check_cg_assembly()
# Add a new instrument agent
# Add a new instrument agent instance
# Check DataProducts
# Check Provenance
IonIntegrationTestCase.assertTrue(self, passing)
# -------------------------------------------------------------------------
def orguserrole_assertions(self):
passing = True
passing &= self._check_marine_facility("MF_CGSN")
passing &= self._check_marine_facility("MF_RSN")
passing &= self._check_marine_facility("MF_EA")
return passing
def _check_marine_facility(self, preload_id):
passing = True
log.debug("Checking marine facility %s and associations", preload_id)
mf_obj = self.retrieve_ooi_asset(preload_id)
mf_id = mf_obj._id
self._resources[preload_id] = mf_id
passing &= self.assertEquals(mf_obj.lcstate, LCS.DEPLOYED)
res_list, _ = self.RR.find_objects(subject=mf_id, predicate=PRED.hasMembership, id_only=True)
passing &= self.assertTrue(len(res_list) >= 3)
res_list, _ = self.RR.find_objects(subject=mf_id, predicate=PRED.hasRole, id_only=False)
passing &= self.assertTrue(len(res_list) >= 5)
passing &= self._check_role_assignments(res_list, "ORG_MANAGER")
passing &= self._check_role_assignments(res_list, "OBSERVATORY_OPERATOR")
passing &= self._check_role_assignments(res_list, "INSTRUMENT_OPERATOR")
return passing
def _check_role_assignments(self, role_list, role_name):
passing = True
role_obj = self._find_resource_in_list(role_list, "governance_name", role_name)
if role_obj:
res_list = self.RR.find_subjects(predicate=PRED.hasRole, object=role_obj._id, id_only=True)
passing &= self.assertTrue(len(res_list) >= 1)
return passing
def parameter_assertions(self):
passing = True
pctx_list, _ = self.RR.find_resources_ext(restype=RT.ParameterContext)
passing &= self.assertTrue(len(pctx_list) >= 10)
pdict_list, _ = self.RR.find_resources_ext(restype=RT.ParameterDictionary)
passing &= self.assertTrue(len(pdict_list) >= 10)
sdef_list, _ = self.RR.find_resources_ext(restype=RT.StreamDefinition)
passing &= self.assertTrue(len(sdef_list) >= 10)
# Verify that a PDict has the appropriate QC parameters defined
pdicts, _ = self.RR.find_resources_ext(restype=RT.ParameterDictionary, alt_id_ns='PRE', alt_id='DICT110')
passing &= self.assertTrue(len(pdicts)==1)
if not pdicts:
return passing
pdict = pdicts[0]
# According to the latest SAF, density should NOT have trend
parameters, _ = self.RR.find_objects(pdict, PRED.hasParameterContext)
names = [i.name for i in parameters if i.name.startswith('density')]
passing &= self.assertTrue('density_trndtst_qc' not in names)
return passing
def agent_assertions(self):
passing = True
# TODO: More tests?
return passing
def sites_assertions(self):
passing = True
observatory_list, _ = self.RR.find_resources_ext(restype=RT.Observatory)
passing &= self.assertTrue(len(observatory_list) >= 40)
for obs in observatory_list:
passing &= self.assertEquals(obs.lcstate, LCS.DEPLOYED)
platform_site_list, _ = self.RR.find_resources(RT.PlatformSite, id_only=False)
log.debug('platform sites: %s', [ps.name for ps in platform_site_list])
passing &= self.assertTrue(len(platform_site_list) >= 30)
return passing
def device_assertions(self):
passing = True
platform_device_list, _ = self.RR.find_resources(RT.PlatformDevice, id_only=False)
passing &= self.assertTrue(len(platform_device_list) >= 30)
for pdev in platform_device_list:
log.debug('platform device: %s', pdev.name)
passing &= self.assertEquals(pdev.lcstate, LCS.PLANNED)
platform_agent_list, _ = self.RR.find_resources(RT.PlatformAgent, id_only=False)
passing &= self.assertTrue(len(platform_agent_list) >= 2)
for pagent in platform_agent_list:
log.debug('platform agent: %s', pagent.name)
passing &= self.assertEquals(pagent.lcstate, LCS.DEPLOYED)
instrument_agent_list, _ = self.RR.find_resources(RT.InstrumentAgent, id_only=False)
passing &= self.assertTrue(len(instrument_agent_list) >= 3)
for iagent in instrument_agent_list:
log.debug('instrument agent: %s', iagent.name)
passing &= self.assertEquals(iagent.lcstate, LCS.DEPLOYED)
model_list, _ = self.RR.find_objects(subject=iagent._id, predicate=PRED.hasModel, id_only=True)
passing &= self.assertTrue(len(model_list) >= 1, "IA %s" % iagent.name)
return passing
def deployment_assertions(self):
passing = True
deployment_list, _ = self.RR.find_resources(RT.Deployment, id_only=False)
passing &= self.assertTrue(len(deployment_list) >= 30)
for deploy in deployment_list:
log.debug('deployment: %s', deploy.name)
passing &= self.assertEquals(deploy.lcstate, LCS.DEPLOYED)
return passing
def rsn_node_checks(self):
"""
Current preload creates:
- PlatformDevice in PLANNED
- PlatformSite in DEPLOYED
- Deployment in DEPLOYED
- Deployment is NOT activated
"""
passing = True
dp_obj = self.retrieve_ooi_asset("CE04OSHY-PN01C_DEP")
passing &= self.assertEquals(dp_obj.lcstate, LCS.DEPLOYED)
passing &= self.assertEquals(dp_obj.availability, AS.AVAILABLE)
log.debug('test_observatory retrieve CE04OSHY-PN01C_DEP deployment: %s', dp_obj)
# Check existing RSN node CE04OSHY-LV01C Deployment (PLANNED lcstate)
CE04OSHY_LV01C_deployment = self.retrieve_ooi_asset('CE04OSHY-LV01C_DEP')
passing &= self.assertEquals(CE04OSHY_LV01C_deployment.lcstate, LCS.DEPLOYED)
passing &= self.assertEquals(CE04OSHY_LV01C_deployment.availability, AS.AVAILABLE)
#self.dump_deployment(CE04OSHY_LV01C_deployment._id)
log.debug('test_observatory retrieve RSN node CE04OSHY-LV01C Deployment: %s', CE04OSHY_LV01C_deployment)
CE04OSHY_LV01C_device = self.retrieve_ooi_asset('CE04OSHY-LV01C_PD')
# Set CE04OSHY-LV01C device to DEVELOPED state
passing &= self.transition_lcs_then_verify(resource_id=CE04OSHY_LV01C_device._id, new_lcs_state=LCE.DEVELOP, verify=LCS.DEVELOPED)
# Set CE04OSHY-LV01C device to INTEGRATED state
passing &= self.transition_lcs_then_verify(resource_id=CE04OSHY_LV01C_device._id, new_lcs_state=LCE.INTEGRATE, verify=LCS.INTEGRATED)
# Set CE04OSHY-LV01C device to DEPLOYED state
passing &= self.transition_lcs_then_verify(resource_id=CE04OSHY_LV01C_device._id, new_lcs_state=LCE.DEPLOY, verify=LCS.DEPLOYED)
# Set CE04OSHY-LV01C Deployment to DEPLOYED state
# NOTE: Deployments are created in DEPLOYED state, currently
#self.transition_lcs_then_verify(resource_id=CE04OSHY_LV01C_deployment._id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
# Activate Deployment for CE04OSHY-LV01C
self.OMS.activate_deployment(CE04OSHY_LV01C_deployment._id)
log.debug('--------- activate_deployment CE04OSHY_LV01C_deployment -------------- ')
self.dump_deployment(CE04OSHY_LV01C_deployment._id)
passing &= self.validate_deployment_activated(CE04OSHY_LV01C_deployment._id)
# (optional) Start CE04OSHY-LV01C platform agent with simulator
# NOTE: DataProduct is generated in DEPLOYED state
# # Set DataProduct for CE04OSHY-LV01C platform to DEPLOYED state
# output_data_product_ids, assns =self.RR.find_objects(subject=CE04OSHY_LV01C_device._id, predicate=PRED.hasOutputProduct, id_only=True)
# if output_data_product_ids:
# #self.assertEquals(len(child_devs), 3)
# for output_data_product_id in output_data_product_ids:
# log.debug('DataProduct for CE04OSHY-LV01C platform: %s', output_data_product_id)
# self.transition_lcs_then_verify(resource_id=output_data_product_id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
# Check events for CE04OSHY-LV01C platform
# Check existing CE04OSBP-LJ01C Deployment (PLANNED lcstate)
# dp_list, _ = self.RR.find_resources_ext(alt_id_ns="PRE", alt_id="CE04OSBP-LJ01C_DEP")
# self.assertEquals(len(dp_list), 1)
# CE04OSHY_LV01C_deployment = dp_list[0]
# self.assertEquals(CE04OSHY_LV01C_deployment.lcstate, 'PLANNED')
# log.debug('test_observatory retrieve RSN node CE04OSBP-LJ01C Deployment: %s', CE04OSHY_LV01C_deployment)
# Set CE04OSBP-LJ01C Deployment to DEPLOYED state
# Update description and other attributes for CE04OSBP-LJ01C device resource
# Create attachment (JPG image) for CE04OSBP-LJ01C device resource
# Activate Deployment for CE04OSBP-LJ01C
# (optional) Add/register CE04OSBP-LJ01C platform agent to parent agent
# (optional) Start CE04OSBP-LJ01C platform agent
return passing
def check_rsn_instrument(self):
"""
Check existing RSN instrument CE04OSBP-LJ01C-06-CTDBPO108 Deployment (PLANNED lcstate)
Current preload creates:
- InstrumentDevice in PLANNED
- InstrumentSite in DEPLOYED
- Deployment in DEPLOYED
- Deployment is activated
"""
passing = True
CE04OSBP_LJ01C_06_CTDBPO108_deploy = self.retrieve_ooi_asset('CE04OSBP-LJ01C-06-CTDBPO108_DEP')
self.dump_deployment(CE04OSBP_LJ01C_06_CTDBPO108_deploy._id)
#passing &= self.assertEquals(CE04OSBP_LJ01C_06_CTDBPO108_deploy.lcstate, 'PLANNED')
# Set CE04OSBP-LJ01C-06-CTDBPO108 device to DEVELOPED state
CE04OSBP_LJ01C_06_CTDBPO108_device = self.retrieve_ooi_asset('CE04OSBP-LJ01C-06-CTDBPO108_ID')
passing &= self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_device._id, new_lcs_state=LCE.DEVELOP, verify='DEVELOPED')
# Set CE04OSBP-LJ01C-06-CTDBPO108 device to INTEGRATED state
passing &= self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_device._id, new_lcs_state=LCE.INTEGRATE, verify='INTEGRATED')
# Set CE04OSBP-LJ01C-06-CTDBPO108 device to DEPLOYED state
passing &= self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_device._id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
# Set CE04OSBP-LJ01C-06-CTDBPO108 Deployment to DEPLOYED state
#self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_deploy._id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
# Activate Deployment for CE04OSBP-LJ01C-06-CTDBPO108 instrument
log.debug('--------- activate_deployment CE04OSBP-LJ01C-06-CTDBPO108 deployment -------------- ')
self.OMS.activate_deployment(CE04OSBP_LJ01C_06_CTDBPO108_deploy._id)
passing &= self.validate_deployment_activated(CE04OSBP_LJ01C_06_CTDBPO108_deploy._id)
# (optional) Add/register CE04OSBP-LJ01C-06-CTDBPO108 instrument agent to parent agent
# (optional) Start CE04OSBP-LJ01C-06-CTDBPO108 instrument agent with simulator
# Set all DataProducts for CE04OSBP-LJ01C-06-CTDBPO108 to DEPLOYED state
# (optional) Create a substitute Deployment for site CE04OSBP-LJ01C-06-CTDBPO108 with a comparable device
CE04OSBP_LJ01C_06_CTDBPO108_isite = self.retrieve_ooi_asset('CE04OSBP-LJ01C-06-CTDBPO108')
## create device here: retrieve CTD Mooring on Mooring Riser 001 - similiar?
GP03FLMB_RI001_10_CTDMOG999_ID_idevice = self.retrieve_ooi_asset('GP03FLMB-RI001-10-CTDMOG999_ID')
deploy_id_2 = self.create_basic_deployment(name='CE04OSBP-LJ01C-06-CTDBPO108_DEP2', description='substitute Deployment for site CE04OSBP-LJ01C-06-CTDBPO108 with a comparable device')
self.OMS.assign_device_to_deployment(instrument_device_id=GP03FLMB_RI001_10_CTDMOG999_ID_idevice._id, deployment_id=deploy_id_2)
self.OMS.assign_site_to_deployment(instrument_site_id=CE04OSBP_LJ01C_06_CTDBPO108_isite._id, deployment_id=deploy_id_2)
self.dump_deployment(deploy_id_2)
# (optional) Activate this second deployment - check first deployment is deactivated
self.OMS.deactivate_deployment(CE04OSBP_LJ01C_06_CTDBPO108_deploy._id)
passing &= self.validate_deployment_deactivated(CE04OSBP_LJ01C_06_CTDBPO108_deploy._id)
# log.debug('Activate deployment deploy_id_2')
# self.get_deployment_ids(deploy_id_2)
# self.dump_deployment(deploy_id_2, "deploy_id_2")
# self.OMS.activate_deployment(deploy_id_2)
# passing &= self.validate_deployment_deactivated(CE04OSBP_LJ01C_06_CTDBPO108_deploy._id)
#
# # (optional) Set first CE04OSBP-LJ01C-06-CTDBPO108 Deployment to INTEGRATED state
# passing &= self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_deploy._id, new_lcs_state=LCE.INTEGRATE, verify='INTEGRATED')
#
# # Set first CE04OSBP-LJ01C-06-CTDBPO108 device to INTEGRATED state
# passing &= self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_device._id, new_lcs_state=LCE.INTEGRATE, verify='INTEGRATED')
#
#
# # (optional) Create a third Deployment for site CE04OSBP-LJ01C-06-CTDBPO108 with a same device from first deployment
# deploy_id_3 = self.create_basic_deployment(name='CE04OSBP-LJ01C-06-CTDBPO108_DEP3', description='substitute Deployment for site CE04OSBP-LJ01C-06-CTDBPO108 with same device as first')
# self.IMS.deploy_instrument_device(instrument_device_id=GP03FLMB_RI001_10_CTDMOG999_ID_idevice._id, deployment_id=deploy_id_3)
# self.OMS.deploy_instrument_site(instrument_site_id=CE04OSBP_LJ01C_06_CTDBPO108_isite._id, deployment_id=deploy_id_3)
# self.dump_deployment(deploy_id_3)
#
#
# # Set first CE04OSBP-LJ01C-06-CTDBPO108 device to DEPLOYED state
# passing &= self.transition_lcs_then_verify(resource_id=CE04OSBP_LJ01C_06_CTDBPO108_device._id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
#
# # (optional) Activate this third deployment - check second deployment is deactivated
# log.debug('Activate deployment deploy_id_3')
# self.dump_deployment(deploy_id_3)
# self.OMS.activate_deployment(deploy_id_3)
# #todo: check second deployment is deactivated
return passing
def check_data_product_reference(self, reference_designator, output=[]):
passing = True
data_product_ids, _ = self.RR.find_resources_ext(alt_id_ns='PRE', alt_id='%s_DPI1' % reference_designator, id_only=True) # Assuming DPI1 is parsed
passing &= self.assertEquals(len(data_product_ids), 1)
if not data_product_ids:
return passing
# Let's go ahead and activate it
data_product_id = data_product_ids[0]
self.dpclient.activate_data_product_persistence(data_product_id)
self.addCleanup(self.dpclient.suspend_data_product_persistence, data_product_id)
dataset_ids, _ = self.RR.find_objects(data_product_id, PRED.hasDataset, id_only=True)
passing &= self.assertEquals(len(dataset_ids), 1)
if not dataset_ids:
return passing
dataset_id = dataset_ids[0]
stream_def_ids, _ = self.RR.find_objects(data_product_id, PRED.hasStreamDefinition, id_only=True)
passing &= self.assertEquals(len(dataset_ids), 1)
if not stream_def_ids:
return passing
stream_def_id = stream_def_ids[0]
output.append((data_product_id, stream_def_id, dataset_id))
return passing
def check_tempsf_instrument_data_product(self, reference_designator):
passing = True
info_list = []
passing &= self.check_data_product_reference(reference_designator, info_list)
if not passing: return passing
data_product_id, stream_def_id, dataset_id = info_list.pop()
now = time.time()
ntp_now = now + 2208988800
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
rdt['time'] = [ntp_now]
rdt['temperature'] = [[ 25.3884, 26.9384, 24.3394, 23.3401, 22.9832,
29.4434, 26.9873, 15.2883, 16.3374, 14.5883, 15.7253, 18.4383,
15.3488, 17.2993, 10.2111, 11.5993, 10.9345, 9.4444, 9.9876,
10.9834, 11.0098, 5.3456, 4.2994, 4.3009]]
dataset_monitor = DatasetMonitor(dataset_id)
self.addCleanup(dataset_monitor.stop)
ParameterHelper.publish_rdt_to_data_product(data_product_id, rdt)
passing &= self.assertTrue(dataset_monitor.wait())
if not passing: return passing
granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
passing &= self.assert_array_almost_equal(rdt['time'], [ntp_now])
passing &= self.assert_array_almost_equal(rdt['temperature'], [[
25.3884, 26.9384, 24.3394, 23.3401, 22.9832, 29.4434, 26.9873,
15.2883, 16.3374, 14.5883, 15.7253, 18.4383, 15.3488, 17.2993,
10.2111, 11.5993, 10.9345, 9.4444, 9.9876, 10.9834, 11.0098,
5.3456, 4.2994, 4.3009]])
return passing
def check_trhph_instrument_data_products(self, reference_designator):
passing = True
info_list = []
passing &= self.check_data_product_reference(reference_designator, info_list)
if not passing:
return passing
data_product_id, stream_def_id, dataset_id = info_list.pop()
pdict = self.RR2.find_parameter_dictionary_of_stream_definition_using_has_parameter_dictionary(stream_def_id)
passing &= self.assertEquals(pdict.name, 'trhph_sample')
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
# calibration constants
a = 1.98e-9
b = -2.45e-6
c = 9.28e-4
d = -0.0888
e = 0.731
V_s = 1.506
V_c = 0.
T = 11.8
r1 = 0.906
r2 = 4.095
r3 = 4.095
ORP_V = 1.806
Cl = np.nan
offset = 2008
gain = 4.0
# Normally this would be 50 per the DPS but the precision is %4.0f which truncates the values to the nearest 1...
ORP = ((ORP_V * 1000.) - offset) / gain
ntp_now = time.time() + 2208988800
rdt['cc_a'] = [a]
rdt['cc_b'] = [b]
rdt['cc_c'] = [c]
rdt['cc_d'] = [d]
rdt['cc_e'] = [e]
rdt['ref_temp_volts'] = [V_s]
rdt['resistivity_temp_volts'] = [V_c]
rdt['eh_sensor'] = [ORP_V]
rdt['resistivity_5'] = [r1]
rdt['resistivity_x1'] = [r2]
rdt['resistivity_x5'] = [r3]
rdt['cc_offset'] = [offset]
rdt['cc_gain'] = [gain]
rdt['time'] = [ntp_now]
passing &= self.assert_array_almost_equal(rdt['vent_fluid_temperaure'], [T], 2)
passing &= self.assert_array_almost_equal(rdt['vent_fluid_chloride_conc'], [Cl], 4)
passing &= self.assert_array_almost_equal(rdt['vent_fluid_orp'], [ORP], 4)
dataset_monitor = DatasetMonitor(dataset_id)
self.addCleanup(dataset_monitor.stop)
ParameterHelper.publish_rdt_to_data_product(data_product_id, rdt)
passing &= self.assertTrue(dataset_monitor.wait())
if not passing: return passing
granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
passing &= self.assert_array_almost_equal(rdt['vent_fluid_temperaure'], [T], 2)
passing &= self.assert_array_almost_equal(rdt['vent_fluid_chloride_conc'], [Cl], 4)
passing &= self.assert_array_almost_equal(rdt['vent_fluid_orp'], [ORP], 4)
return passing
def check_vel3d_instrument_data_products(self, reference_designator):
passing = True
info_list = []
passing &= self.check_data_product_reference(reference_designator, info_list)
if not passing:
return passing
data_product_id, stream_def_id, dataset_id = info_list.pop()
pdict = self.RR2.find_parameter_dictionary_of_stream_definition_using_has_parameter_dictionary(stream_def_id)
self.assertEquals(pdict.name, 'vel3d_b_sample')
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
lat = 14.6846
lon = -51.044
ts = np.array([3319563600, 3319567200, 3319570800, 3319574400,
3319578000, 3319581600, 3319585200, 3319588800, 3319592400,
3319596000], dtype=np.float)
ve = np.array([ -3.2, 0.1, 0. , 2.3, -0.1, 5.6, 5.1, 5.8,
8.8, 10.3])
vn = np.array([ 18.2, 9.9, 12. , 6.6, 7.4, 3.4, -2.6, 0.2,
-1.5, 4.1])
vu = np.array([-1.1, -0.6, -1.4, -2, -1.7, -2, 1.3, -1.6, -1.1, -4.5])
ve_expected = np.array([-0.085136, -0.028752, -0.036007, 0.002136,
-0.023158, 0.043218, 0.056451, 0.054727, 0.088446, 0.085952])
vn_expected = np.array([ 0.164012, 0.094738, 0.114471, 0.06986, 0.07029,
0.049237, -0.009499, 0.019311, 0.012096, 0.070017])
vu_expected = np.array([-0.011, -0.006, -0.014, -0.02, -0.017, -0.02,
0.013, -0.016, -0.011, -0.045])
rdt['time'] = ts
rdt['lat'] = [lat] * 10
rdt['lon'] = [lon] * 10
rdt['turbulent_velocity_east'] = ve
rdt['turbulent_velocity_north'] = vn
rdt['turbulent_velocity_up'] = vu
passing &= self.assert_array_almost_equal(rdt['eastward_turbulent_velocity'],
ve_expected)
passing &= self.assert_array_almost_equal(rdt['northward_turbulent_velocity'],
vn_expected)
passing &= self.assert_array_almost_equal(rdt['upward_turbulent_velocity'],
vu_expected)
dataset_monitor = DatasetMonitor(dataset_id)
self.addCleanup(dataset_monitor.stop)
ParameterHelper.publish_rdt_to_data_product(data_product_id, rdt)
passing &= self.assertTrue(dataset_monitor.wait())
if not passing: return passing
granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
passing &= self.assert_array_almost_equal(rdt['eastward_turbulent_velocity'],
ve_expected)
passing &= self.assert_array_almost_equal(rdt['northward_turbulent_velocity'],
vn_expected)
passing &= self.assert_array_almost_equal(rdt['upward_turbulent_velocity'],
vu_expected)
return passing
def check_presta_instrument_data_products(self, reference_designator):
# Check the parsed data product make sure it's got everything it needs and can be published persisted etc.
# Absolute Pressure (SFLPRES_L0) is what comes off the instrumnet, SFLPRES_L1 is a pfunc
# Let's go ahead and publish some fake data!!!
# According to https://alfresco.oceanobservatories.org/alfresco/d/d/workspace/SpacesStore/63e16865-9d9e-4b11-b0b3-d5658faa5080/1341-00230_Data_Product_Spec_SFLPRES_OOI.pdf
# Appendix A. Example 1.
# p_psia_tide = 14.8670
# the tide should be 10.2504
passing = True
info_list = []
passing &= self.check_data_product_reference(reference_designator, info_list)
if not passing:
return passing
data_product_id, stream_def_id, dataset_id = info_list.pop()
now = time.time()
ntp_now = now + 2208988800.
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
rdt['time'] = [ntp_now]
rdt['absolute_pressure'] = [14.8670]
passing &= self.assert_array_almost_equal(rdt['seafloor_pressure'], [10.2504], 4)
dataset_monitor = DatasetMonitor(dataset_id)
self.addCleanup(dataset_monitor.stop)
ParameterHelper.publish_rdt_to_data_product(data_product_id, rdt)
self.assertTrue(dataset_monitor.wait()) # Bumped to 20 to keep buildbot happy
if not passing: return passing
granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
passing &= self.assert_array_almost_equal(rdt['time'], [ntp_now])
passing &= self.assert_array_almost_equal(rdt['seafloor_pressure'], [10.2504], 4)
passing &= self.assert_array_almost_equal(rdt['absolute_pressure'], [14.8670], 4)
return passing
def check_rsn_instrument_data_product(self):
passing = True
# for RS03AXBS-MJ03A-06-PRESTA301 (PREST-A) there are a few listed data products
# Parsed, Engineering
# SFLPRES-0 SFLPRES-1
# Check for the two data products and make sure they have the proper parameters
# SFLPRES-0 should
data_products, _ = self.RR.find_resources_ext(alt_id_ns='PRE', alt_id='RS03AXBS-MJ03A-06-PRESTA301_SFLPRES_L0_DPID', id_only=True)
passing &=self.assertTrue(len(data_products)==1)
if not data_products:
return passing
data_product_id = data_products[0]
stream_defs, _ = self.RR.find_objects(data_product_id,PRED.hasStreamDefinition,id_only=False)
passing &= self.assertTrue(len(stream_defs)==1)
if not stream_defs:
return passing
# Assert that the stream definition has the correct reference designator
stream_def = stream_defs[0]
passing &= self.assertEquals(stream_def.stream_configuration['reference_designator'], 'RS03AXBS-MJ03A-06-PRESTA301')
# Get the pdict and make sure that the parameters corresponding to the available fields
# begin with the appropriate data product identifier
pdict_ids, _ = self.RR.find_objects(stream_def, PRED.hasParameterDictionary, id_only=True)
passing &= self.assertEquals(len(pdict_ids), 1)
if not pdict_ids:
return passing
pdict_id = pdict_ids[0]
pdict = DatasetManagementService.get_parameter_dictionary(pdict_id)
available_params = [pdict.get_context(i) for i in pdict.keys() if i in stream_def.available_fields]
for p in available_params:
if p.name=='time': # Ignore the domain parameter
continue
passing &= self.assertTrue(p.ooi_short_name.startswith('SFLPRES'))
passing &= self.check_presta_instrument_data_products('RS01SLBS-MJ01A-06-PRESTA101')
passing &= self.check_vel3d_instrument_data_products( 'RS01SLBS-MJ01A-12-VEL3DB101')
passing &= self.check_presta_instrument_data_products('RS03AXBS-MJ03A-06-PRESTA301')
passing &= self.check_vel3d_instrument_data_products( 'RS03AXBS-MJ03A-12-VEL3DB301')
passing &= self.check_tempsf_instrument_data_product( 'RS03ASHS-MJ03B-07-TMPSFA301')
passing &= self.check_vel3d_instrument_data_products( 'RS03INT2-MJ03D-12-VEL3DB304')
passing &= self.check_trhph_instrument_data_products( 'RS03INT1-MJ03C-10-TRHPHA301')
self.data_product_management.activate_data_product_persistence(data_product_id)
dataset_id = self.RR2.find_dataset_id_of_data_product_using_has_dataset(data_product_id)
granule = self.data_retriever.retrieve(dataset_id)
rdt = RecordDictionaryTool.load_from_granule(granule)
self.assert_array_almost_equal(rdt['seafloor_pressure'], [10.2504], 4)
self.assert_array_almost_equal(rdt['absolute_pressure'], [14.8670], 4)
self.data_product_management.suspend_data_product_persistence(data_product_id) # Should do nothing and not raise anything
return passing
def check_glider(self):
'''
# Check that glider GP05MOAS-GL001 assembly is defined by OOI preload (3 instruments)
'''
passing = True
GP05MOAS_GL001_device = self.retrieve_ooi_asset('GP05MOAS-GL001_PD')
child_devs, assns =self.RR.find_objects(subject=GP05MOAS_GL001_device._id, predicate=PRED.hasDevice, id_only=True)
passing &= self.assertEquals(len(child_devs), 3)
# Set GP05MOAS-GL001 Deployment to DEPLOYED
GP05MOAS_GL001_deploy = self.retrieve_ooi_asset('GP05MOAS-GL001_DEP')
passing &= self.transition_lcs_then_verify(resource_id=GP05MOAS_GL001_deploy._id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
# Activate Deployment for GP05MOAS-GL001
#self.OMS.activate_deployment(GP05MOAS_GL001_deploy._id)
# Deactivate Deployment for GP05MOAS-GL001
#self.OMS.deactivate_deployment(GP05MOAS_GL001_deploy._id)
# Create a new Deployment resource X without any assignment
x_deploy_id = self.create_basic_deployment(name='X_Deployment', description='new Deployment resource X without any assignment')
# Assign Deployment X to site GP05MOAS-GL001
GP05MOAS_GL001_psite = self.retrieve_ooi_asset('GP05MOAS-GL001')
self.OMS.assign_site_to_deployment(GP05MOAS_GL001_psite._id, x_deploy_id)
# Assign Deployment X to first device for GP05MOAS-GL001
GP05MOAS_GL001_device = self.retrieve_ooi_asset('GP05MOAS-GL001_PD')
self.OMS.assign_device_to_deployment(GP05MOAS_GL001_device._id, x_deploy_id)
# Set GP05MOAS-GL001 Deployment to PLANNED state
#self.transition_lcs_then_verify(resource_id=x_deploy_id, new_lcs_state=LCE.PLAN, verify='PLANNED')
# ??? already in planned
# Set second GP05MOAS-GL001 Deployment to DEPLOYED
passing &= self.transition_lcs_then_verify(resource_id=x_deploy_id, new_lcs_state=LCE.DEPLOY, verify='DEPLOYED')
self.dump_deployment(x_deploy_id)
# Activate second Deployment for GP05MOAS-GL001
#self.OMS.activate_deployment(x_deploy_id)
# Deactivate second Deployment for GP05MOAS-GL001
#self.OMS.deactivate_deployment(x_deploy_id)
return passing
def check_cg_assembly(self):
passing = True
# Set several CE01ISSM-RI002-* instrument devices to DEVELOPED state
# Assemble several CE01ISSM-RI002-* instruments to a CG CE01ISSM-RI002 component platform
# Set several CE01ISSM-RI002-* instrument devices to INTEGRATED state
# Assemble CE01ISSM-RI002 platform to CG CE01ISSM-LM001 station platform
# Set CE01ISSM-RI002 component device to INTEGRATED state
# Set CE01ISSM-LM001 station device to INTEGRATED state
# Set CE01ISSM-LM001 station device to DEPLOYED state (children maybe too?)
# Set CE01ISSM-LM001 Deployment to DEPLOYED
# Activate CE01ISSM-LM001 platform assembly deployment
# Dectivate CE01ISSM-LM001 platform assembly deployment
# Set CE01ISSM-LM001 Deployment to INTEGRATED state
# Set CE01ISSM-LM001 station device to INTEGRATED state
# Set CE01ISSM-RI002 component device to INTEGRATED state
# Set CE01ISSM-RI002 component device to INTEGRATED state
# Disassemble CE01ISSM-RI002 platform from CG CE01ISSM-LM001 station platform
# Disassemble all CE01ISSM-RI002-* instruments from a CG CE01ISSM-RI002 component platform
# Retire instrument one for CE01ISSM-RI002-*
# Retire device one for CE01ISSM-RI002
# Retire device one for CE01ISSM-LM001
return passing
# -------------------------------------------------------------------------
def retrieve_ooi_asset(self, alt_id='', namespace='PRE'):
dp_list, _ = self.RR.find_resources_ext(alt_id_ns=namespace, alt_id=alt_id)
self.assertEquals(len(dp_list), 1)
return dp_list[0]
def transition_lcs_then_verify(self, resource_id, new_lcs_state, verify):
ret = self.RR2.advance_lcs(resource_id, new_lcs_state)
resource_obj = self.RR.read(resource_id)
return self.assertEquals(resource_obj.lcstate, verify)
def create_basic_deployment(self, name='', description=''):
start = IonTime(datetime.datetime(2013,1,1))
end = IonTime(datetime.datetime(2014,1,1))
temporal_bounds = IonObject(OT.TemporalBounds, name='planned', start_datetime=start.to_string(), end_datetime=end.to_string())
deployment_obj = IonObject(RT.Deployment,
name=name,
description=description,
context=IonObject(OT.CabledNodeDeploymentContext),
constraint_list=[temporal_bounds])
return self.OMS.create_deployment(deployment_obj)
def validate_deployment_activated(self, deployment_id=''):
site_id, device_id = self.get_deployment_ids(deployment_id)
assocs = self.RR.find_associations(subject=site_id, predicate=PRED.hasDevice, object=device_id)
return self.assertEquals(len(assocs), 1)
def validate_deployment_deactivated(self, deployment_id=''):
site_id, device_id = self.get_deployment_ids(deployment_id)
assocs = self.RR.find_associations(subject=site_id, predicate=PRED.hasDevice, object=device_id)
return self.assertEquals(len(assocs), 0)
def dump_deployment(self, deployment_id='', name=""):
#site_id, device_id = self.get_deployment_ids(deployment_id)
resource_list,_ = self.RR.find_subjects(predicate=PRED.hasDeployment, object=deployment_id, id_only=True)
resource_list.append(deployment_id)
resources = self.RR.read_mult(resource_list )
log.debug('--------- dump_deployment %s summary---------------', name)
for resource in resources:
log.debug('%s: %s (%s)', resource._get_type(), resource.name, resource._id)
log.debug('--------- dump_deployment %s full dump ---------------', name)
for resource in resources:
log.debug('resource: %s ', resource)
log.debug('--------- dump_deployment %s end ---------------', name)
#assocs = self.container.resource_registry.find_assoctiations(anyside=deployment_id)
# assocs = Container.instance.resource_registry.find_assoctiations(anyside=deployment_id)
# log.debug('--------- dump_deployment ---------------')
# for assoc in assocs:
# log.debug('SUBJECT: %s PREDICATE: %s OBJET: %s', assoc.s, assoc.p, assoc.o)
# log.debug('--------- dump_deployment end ---------------')
def get_deployment_ids(self, deployment_id=''):
devices = []
sites = []
idevice_list,_ = self.RR.find_subjects(RT.InstrumentDevice, PRED.hasDeployment, deployment_id, id_only=True)
pdevice_list,_ = self.RR.find_subjects(RT.PlatformDevice, PRED.hasDeployment, deployment_id, id_only=True)
devices = idevice_list + pdevice_list
self.assertEquals(1, len(devices))
isite_list,_ = self.RR.find_subjects(RT.InstrumentSite, PRED.hasDeployment, deployment_id, id_only=True)
psite_list,_ = self.RR.find_subjects(RT.PlatformSite, PRED.hasDeployment, deployment_id, id_only=True)
sites = isite_list + psite_list
self.assertEquals(1, len(sites))
return sites[0], devices[0]
def _find_resource_in_list(self, res_list, attr, attr_val, assert_found=True):
for res in res_list:
v = getattr(res, attr, None)
if v == attr_val:
return res
if assert_found:
self.assertTrue(False, "Attribute %s value %s not found in list" % (attr, attr_val))
return None
# -------------------------------------------------------------------------
def _get_caller(self):
s = inspect.stack()
return "%s:%s" % (s[2][1], s[2][2])
@assertion_wrapper
def assert_array_almost_equal(self, *args, **kwargs):
np.testing.assert_array_almost_equal(*args, **kwargs)
@assertion_wrapper
def assertEquals(self, *args, **kwargs):
IonIntegrationTestCase.assertEquals(self, *args, **kwargs)
@assertion_wrapper
def assertTrue(self, *args, **kwargs):
IonIntegrationTestCase.assertTrue(self, *args, **kwargs)
| ooici/coi-services | ion/services/sa/observatory/test/test_observatory_full_integration.py | Python | bsd-2-clause | 43,907 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Access(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the traffic is allowed or denied.
"""
ALLOW = "Allow"
DENY = "Deny"
class ApplicationGatewayBackendHealthServerHealth(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Health of backend server.
"""
UNKNOWN = "Unknown"
UP = "Up"
DOWN = "Down"
PARTIAL = "Partial"
DRAINING = "Draining"
class ApplicationGatewayCookieBasedAffinity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Cookie based affinity.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ApplicationGatewayCustomErrorStatusCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status code of the application gateway customer error.
"""
HTTP_STATUS403 = "HttpStatus403"
HTTP_STATUS502 = "HttpStatus502"
class ApplicationGatewayFirewallMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Web application firewall mode.
"""
DETECTION = "Detection"
PREVENTION = "Prevention"
class ApplicationGatewayOperationalState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operational state of the application gateway resource.
"""
STOPPED = "Stopped"
STARTING = "Starting"
RUNNING = "Running"
STOPPING = "Stopping"
class ApplicationGatewayProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol used to communicate with the backend. Possible values are 'Http' and 'Https'.
"""
HTTP = "Http"
HTTPS = "Https"
class ApplicationGatewayRedirectType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PERMANENT = "Permanent"
FOUND = "Found"
SEE_OTHER = "SeeOther"
TEMPORARY = "Temporary"
class ApplicationGatewayRequestRoutingRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Rule type.
"""
BASIC = "Basic"
PATH_BASED_ROUTING = "PathBasedRouting"
class ApplicationGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of an application gateway SKU.
"""
STANDARD_SMALL = "Standard_Small"
STANDARD_MEDIUM = "Standard_Medium"
STANDARD_LARGE = "Standard_Large"
WAF_MEDIUM = "WAF_Medium"
WAF_LARGE = "WAF_Large"
STANDARD_V2 = "Standard_v2"
WAF_V2 = "WAF_v2"
class ApplicationGatewaySslCipherSuite(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl cipher suites enums.
"""
TLS_ECDHE_RSA_WITH_AES256_CBC_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"
TLS_ECDHE_RSA_WITH_AES128_CBC_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
TLS_ECDHE_RSA_WITH_AES256_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
TLS_ECDHE_RSA_WITH_AES128_CBC_SHA = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
TLS_DHE_RSA_WITH_AES256_GCM_SHA384 = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"
TLS_DHE_RSA_WITH_AES128_GCM_SHA256 = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
TLS_DHE_RSA_WITH_AES256_CBC_SHA = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"
TLS_DHE_RSA_WITH_AES128_CBC_SHA = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"
TLS_RSA_WITH_AES256_GCM_SHA384 = "TLS_RSA_WITH_AES_256_GCM_SHA384"
TLS_RSA_WITH_AES128_GCM_SHA256 = "TLS_RSA_WITH_AES_128_GCM_SHA256"
TLS_RSA_WITH_AES256_CBC_SHA256 = "TLS_RSA_WITH_AES_256_CBC_SHA256"
TLS_RSA_WITH_AES128_CBC_SHA256 = "TLS_RSA_WITH_AES_128_CBC_SHA256"
TLS_RSA_WITH_AES256_CBC_SHA = "TLS_RSA_WITH_AES_256_CBC_SHA"
TLS_RSA_WITH_AES128_CBC_SHA = "TLS_RSA_WITH_AES_128_CBC_SHA"
TLS_ECDHE_ECDSA_WITH_AES256_GCM_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
TLS_ECDHE_ECDSA_WITH_AES128_GCM_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
TLS_ECDHE_ECDSA_WITH_AES256_CBC_SHA384 = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"
TLS_ECDHE_ECDSA_WITH_AES128_CBC_SHA256 = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
TLS_ECDHE_ECDSA_WITH_AES256_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
TLS_ECDHE_ECDSA_WITH_AES128_CBC_SHA = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
TLS_DHE_DSS_WITH_AES256_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"
TLS_DHE_DSS_WITH_AES128_CBC_SHA256 = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"
TLS_DHE_DSS_WITH_AES256_CBC_SHA = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"
TLS_DHE_DSS_WITH_AES128_CBC_SHA = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"
TLS_RSA_WITH3_DES_EDE_CBC_SHA = "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
TLS_DHE_DSS_WITH3_DES_EDE_CBC_SHA = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"
TLS_ECDHE_RSA_WITH_AES128_GCM_SHA256 = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
TLS_ECDHE_RSA_WITH_AES256_GCM_SHA384 = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
class ApplicationGatewaySslPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl predefined policy name enums.
"""
APP_GW_SSL_POLICY20150501 = "AppGwSslPolicy20150501"
APP_GW_SSL_POLICY20170401 = "AppGwSslPolicy20170401"
APP_GW_SSL_POLICY20170401_S = "AppGwSslPolicy20170401S"
class ApplicationGatewaySslPolicyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Ssl Policy
"""
PREDEFINED = "Predefined"
CUSTOM = "Custom"
class ApplicationGatewaySslProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Ssl protocol enums.
"""
TL_SV1_0 = "TLSv1_0"
TL_SV1_1 = "TLSv1_1"
TL_SV1_2 = "TLSv1_2"
class ApplicationGatewayTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Tier of an application gateway.
"""
STANDARD = "Standard"
WAF = "WAF"
STANDARD_V2 = "Standard_v2"
WAF_V2 = "WAF_v2"
class AssociationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The association type of the child resource to the parent resource.
"""
ASSOCIATED = "Associated"
CONTAINS = "Contains"
class AuthenticationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client Authentication Method. Possible values are: 'EAPTLS' and 'EAPMSCHAPv2'.
"""
EAPTLS = "EAPTLS"
EAPMSCHA_PV2 = "EAPMSCHAPv2"
class AuthorizationUseStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
"""
AVAILABLE = "Available"
IN_USE = "InUse"
class AzureFirewallApplicationRuleProtocolType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol type of a Application Rule resource
"""
HTTP = "Http"
HTTPS = "Https"
class AzureFirewallNatRCActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a NAT rule collection
"""
SNAT = "Snat"
DNAT = "Dnat"
class AzureFirewallNetworkRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of a Network Rule resource
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
ICMP = "ICMP"
class AzureFirewallRCActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action type of a rule collection
"""
ALLOW = "Allow"
DENY = "Deny"
ALERT = "Alert"
class AzureFirewallThreatIntelMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The operation mode for Threat Intel
"""
ALERT = "Alert"
DENY = "Deny"
OFF = "Off"
class BgpPeerState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The BGP peer state
"""
UNKNOWN = "Unknown"
STOPPED = "Stopped"
IDLE = "Idle"
CONNECTING = "Connecting"
CONNECTED = "Connected"
class CircuitConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Express Route Circuit Connection State. Possible values are: 'Connected' and 'Disconnected'.
"""
CONNECTED = "Connected"
CONNECTING = "Connecting"
DISCONNECTED = "Disconnected"
class ConnectionMonitorSourceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of connection monitor source.
"""
UNKNOWN = "Unknown"
ACTIVE = "Active"
INACTIVE = "Inactive"
class ConnectionState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The connection state.
"""
REACHABLE = "Reachable"
UNREACHABLE = "Unreachable"
UNKNOWN = "Unknown"
class ConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The connection status.
"""
UNKNOWN = "Unknown"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
DEGRADED = "Degraded"
class DdosCustomPolicyProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol for which the DDoS protection policy is being customized.
"""
TCP = "Tcp"
UDP = "Udp"
SYN = "Syn"
class DdosCustomPolicyTriggerSensitivityOverride(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The customized DDoS protection trigger rate sensitivity degrees. High: Trigger rate set with
most sensitivity w.r.t. normal traffic. Default: Trigger rate set with moderate sensitivity
w.r.t. normal traffic. Low: Trigger rate set with less sensitivity w.r.t. normal traffic.
Relaxed: Trigger rate set with least sensitivity w.r.t. normal traffic.
"""
RELAXED = "Relaxed"
LOW = "Low"
DEFAULT = "Default"
HIGH = "High"
class DdosSettingsProtectionCoverage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The DDoS protection policy customizability of the public IP. Only standard coverage will have
the ability to be customized.
"""
BASIC = "Basic"
STANDARD = "Standard"
class DhGroup(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The DH Groups used in IKE Phase 1 for initial SA.
"""
NONE = "None"
DH_GROUP1 = "DHGroup1"
DH_GROUP2 = "DHGroup2"
DH_GROUP14 = "DHGroup14"
DH_GROUP2048 = "DHGroup2048"
ECP256 = "ECP256"
ECP384 = "ECP384"
DH_GROUP24 = "DHGroup24"
class Direction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The direction of the packet represented as a 5-tuple.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class EffectiveRouteSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Who created the route. Possible values are: 'Unknown', 'User', 'VirtualNetworkGateway', and
'Default'.
"""
UNKNOWN = "Unknown"
USER = "User"
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
DEFAULT = "Default"
class EffectiveRouteState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The value of effective route. Possible values are: 'Active' and 'Invalid'.
"""
ACTIVE = "Active"
INVALID = "Invalid"
class EffectiveSecurityRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The network protocol this rule applies to. Possible values are: 'Tcp', 'Udp', and 'All'.
"""
TCP = "Tcp"
UDP = "Udp"
ALL = "All"
class EvaluationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Connectivity analysis evaluation state.
"""
NOT_STARTED = "NotStarted"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured',
'Configuring', 'Configured', and 'ValidationNeeded'.
"""
NOT_CONFIGURED = "NotConfigured"
CONFIGURING = "Configuring"
CONFIGURED = "Configured"
VALIDATION_NEEDED = "ValidationNeeded"
class ExpressRouteCircuitPeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of peering. Possible values are: 'Disabled' and 'Enabled'
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ExpressRouteCircuitSkuFamily(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'.
"""
UNLIMITED_DATA = "UnlimitedData"
METERED_DATA = "MeteredData"
class ExpressRouteCircuitSkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The tier of the SKU. Possible values are 'Standard', 'Premium' or 'Local'.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
BASIC = "Basic"
LOCAL = "Local"
class ExpressRouteLinkAdminState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Administrative state of the physical port
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ExpressRouteLinkConnectorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Physical fiber port type.
"""
LC = "LC"
SC = "SC"
class ExpressRoutePeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of peering. Possible values are: 'Disabled' and 'Enabled'
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ExpressRoutePeeringType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and
'MicrosoftPeering'.
"""
AZURE_PUBLIC_PEERING = "AzurePublicPeering"
AZURE_PRIVATE_PEERING = "AzurePrivatePeering"
MICROSOFT_PEERING = "MicrosoftPeering"
class ExpressRoutePortsEncapsulation(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Encapsulation method on physical ports.
"""
DOT1_Q = "Dot1Q"
QIN_Q = "QinQ"
class FlowLogFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The file type of flow log.
"""
JSON = "JSON"
class HTTPMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""HTTP method.
"""
GET = "Get"
class HubVirtualNetworkConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the VirtualHub to vnet connection.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class IkeEncryption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IKE encryption algorithm (IKE phase 2).
"""
DES = "DES"
DES3 = "DES3"
AES128 = "AES128"
AES192 = "AES192"
AES256 = "AES256"
GCMAES256 = "GCMAES256"
GCMAES128 = "GCMAES128"
class IkeIntegrity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IKE integrity algorithm (IKE phase 2).
"""
MD5 = "MD5"
SHA1 = "SHA1"
SHA256 = "SHA256"
SHA384 = "SHA384"
GCMAES256 = "GCMAES256"
GCMAES128 = "GCMAES128"
class IPAllocationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""PrivateIP allocation method.
"""
STATIC = "Static"
DYNAMIC = "Dynamic"
class IpFlowProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol to be verified on.
"""
TCP = "TCP"
UDP = "UDP"
class IpsecEncryption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IPSec encryption algorithm (IKE phase 1).
"""
NONE = "None"
DES = "DES"
DES3 = "DES3"
AES128 = "AES128"
AES192 = "AES192"
AES256 = "AES256"
GCMAES128 = "GCMAES128"
GCMAES192 = "GCMAES192"
GCMAES256 = "GCMAES256"
class IpsecIntegrity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The IPSec integrity algorithm (IKE phase 1).
"""
MD5 = "MD5"
SHA1 = "SHA1"
SHA256 = "SHA256"
GCMAES128 = "GCMAES128"
GCMAES192 = "GCMAES192"
GCMAES256 = "GCMAES256"
class IPVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Available from Api-Version 2016-03-30 onwards, it represents whether the specific
ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and
'IPv6'.
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class IssueType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of issue.
"""
UNKNOWN = "Unknown"
AGENT_STOPPED = "AgentStopped"
GUEST_FIREWALL = "GuestFirewall"
DNS_RESOLUTION = "DnsResolution"
SOCKET_BIND = "SocketBind"
NETWORK_SECURITY_RULE = "NetworkSecurityRule"
USER_DEFINED_ROUTE = "UserDefinedRoute"
PORT_THROTTLED = "PortThrottled"
PLATFORM = "Platform"
class LoadBalancerSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a load balancer SKU.
"""
BASIC = "Basic"
STANDARD = "Standard"
class LoadDistribution(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and
'SourceIPProtocol'.
"""
DEFAULT = "Default"
SOURCE_IP = "SourceIP"
SOURCE_IP_PROTOCOL = "SourceIPProtocol"
class NetworkOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Azure async operation. Possible values are: 'InProgress', 'Succeeded', and
'Failed'.
"""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class NextHopType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Next hop type.
"""
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
HYPER_NET_GATEWAY = "HyperNetGateway"
NONE = "None"
class OfficeTrafficCategory(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The office traffic category.
"""
OPTIMIZE = "Optimize"
OPTIMIZE_AND_ALLOW = "OptimizeAndAllow"
ALL = "All"
NONE = "None"
class Origin(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The origin of the issue.
"""
LOCAL = "Local"
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class OutboundRulePropertiesFormatProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol - TCP, UDP or All
"""
TCP = "Tcp"
UDP = "Udp"
ALL = "All"
class PcError(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INTERNAL_ERROR = "InternalError"
AGENT_STOPPED = "AgentStopped"
CAPTURE_FAILED = "CaptureFailed"
LOCAL_FILE_FAILED = "LocalFileFailed"
STORAGE_FAILED = "StorageFailed"
class PcProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol to be filtered on.
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
class PcStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the packet capture session.
"""
NOT_STARTED = "NotStarted"
RUNNING = "Running"
STOPPED = "Stopped"
ERROR = "Error"
UNKNOWN = "Unknown"
class PfsGroup(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Pfs Groups used in IKE Phase 2 for new child SA.
"""
NONE = "None"
PFS1 = "PFS1"
PFS2 = "PFS2"
PFS2048 = "PFS2048"
ECP256 = "ECP256"
ECP384 = "ECP384"
PFS24 = "PFS24"
PFS14 = "PFS14"
PFSMM = "PFSMM"
class ProbeProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of the end point. Possible values are: 'Http', 'Tcp', or 'Https'. If 'Tcp' is
specified, a received ACK is required for the probe to be successful. If 'Http' or 'Https' is
specified, a 200 OK response from the specifies URI is required for the probe to be successful.
"""
HTTP = "Http"
TCP = "Tcp"
HTTPS = "Https"
class ProcessorArchitecture(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client Processor Architecture. Possible values are: 'AMD64' and 'X86'.
"""
AMD64 = "Amd64"
X86 = "X86"
class Protocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol.
"""
TCP = "Tcp"
HTTP = "Http"
HTTPS = "Https"
ICMP = "Icmp"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the resource.
"""
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
DELETING = "Deleting"
FAILED = "Failed"
class PublicIPAddressSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a public IP address SKU.
"""
BASIC = "Basic"
STANDARD = "Standard"
class PublicIPPrefixSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a public IP prefix SKU.
"""
STANDARD = "Standard"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes
both an implicitly created identity and a set of user assigned identities. The type 'None' will
remove any identities from the virtual machine.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class RouteFilterRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The rule type of the rule. Valid value is: 'Community'
"""
COMMUNITY = "Community"
class RouteNextHopType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of Azure hop the packet should be sent to. Possible values are:
'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'.
"""
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
NONE = "None"
class SecurityRuleAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
ALLOW = "Allow"
DENY = "Deny"
class SecurityRuleDirection(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The direction of the rule. Possible values are: 'Inbound and Outbound'.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class SecurityRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
TCP = "Tcp"
UDP = "Udp"
ASTERISK = "*"
class ServiceProviderProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The ServiceProviderProvisioningState state of the resource. Possible values are
'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
"""
NOT_PROVISIONED = "NotProvisioned"
PROVISIONING = "Provisioning"
PROVISIONED = "Provisioned"
DEPROVISIONING = "Deprovisioning"
class Severity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The severity of the issue.
"""
ERROR = "Error"
WARNING = "Warning"
class TransportProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The transport protocol for the endpoint. Possible values are 'Udp' or 'Tcp' or 'All'.
"""
UDP = "Udp"
TCP = "Tcp"
ALL = "All"
class TunnelConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the tunnel.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of measurement.
"""
COUNT = "Count"
class VerbosityLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Verbosity level. Accepted values are 'Normal', 'Minimum', 'Full'.
"""
NORMAL = "Normal"
MINIMUM = "Minimum"
FULL = "Full"
class VirtualNetworkGatewayConnectionProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway connection protocol. Possible values are: 'IKEv2', 'IKEv1'.
"""
IK_EV2 = "IKEv2"
IK_EV1 = "IKEv1"
class VirtualNetworkGatewayConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Virtual network Gateway connection status
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class VirtualNetworkGatewayConnectionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway connection type. Possible values are: 'Ipsec','Vnet2Vnet','ExpressRoute', and
'VPNClient.
"""
I_PSEC = "IPsec"
VNET2_VNET = "Vnet2Vnet"
EXPRESS_ROUTE = "ExpressRoute"
VPN_CLIENT = "VPNClient"
class VirtualNetworkGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway SKU name.
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
ULTRA_PERFORMANCE = "UltraPerformance"
VPN_GW1 = "VpnGw1"
VPN_GW2 = "VpnGw2"
VPN_GW3 = "VpnGw3"
VPN_GW1_AZ = "VpnGw1AZ"
VPN_GW2_AZ = "VpnGw2AZ"
VPN_GW3_AZ = "VpnGw3AZ"
ER_GW1_AZ = "ErGw1AZ"
ER_GW2_AZ = "ErGw2AZ"
ER_GW3_AZ = "ErGw3AZ"
class VirtualNetworkGatewaySkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway SKU tier.
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
ULTRA_PERFORMANCE = "UltraPerformance"
VPN_GW1 = "VpnGw1"
VPN_GW2 = "VpnGw2"
VPN_GW3 = "VpnGw3"
VPN_GW1_AZ = "VpnGw1AZ"
VPN_GW2_AZ = "VpnGw2AZ"
VPN_GW3_AZ = "VpnGw3AZ"
ER_GW1_AZ = "ErGw1AZ"
ER_GW2_AZ = "ErGw2AZ"
ER_GW3_AZ = "ErGw3AZ"
class VirtualNetworkGatewayType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'.
"""
VPN = "Vpn"
EXPRESS_ROUTE = "ExpressRoute"
class VirtualNetworkPeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and
'Disconnected'.
"""
INITIATED = "Initiated"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
class VirtualWanSecurityProviderType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The virtual wan security provider type.
"""
EXTERNAL = "External"
NATIVE = "Native"
class VpnClientProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client protocol enabled for the virtual network gateway.
"""
IKE_V2 = "IkeV2"
SSTP = "SSTP"
OPEN_VPN = "OpenVPN"
class VpnConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the vpn connection.
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class VpnGatewayTunnelingProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN protocol enabled for the P2SVpnServerConfiguration.
"""
IKE_V2 = "IkeV2"
OPEN_VPN = "OpenVPN"
class VpnType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'.
"""
POLICY_BASED = "PolicyBased"
ROUTE_BASED = "RouteBased"
class WebApplicationFirewallAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Actions
"""
ALLOW = "Allow"
BLOCK = "Block"
LOG = "Log"
class WebApplicationFirewallEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if the policy is in enabled state or disabled state
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class WebApplicationFirewallMatchVariable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Match Variable
"""
REMOTE_ADDR = "RemoteAddr"
REQUEST_METHOD = "RequestMethod"
QUERY_STRING = "QueryString"
POST_ARGS = "PostArgs"
REQUEST_URI = "RequestUri"
REQUEST_HEADERS = "RequestHeaders"
REQUEST_BODY = "RequestBody"
REQUEST_COOKIES = "RequestCookies"
class WebApplicationFirewallMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if it is in detection mode or prevention mode at policy level
"""
PREVENTION = "Prevention"
DETECTION = "Detection"
class WebApplicationFirewallOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes operator to be matched
"""
IP_MATCH = "IPMatch"
EQUAL = "Equal"
CONTAINS = "Contains"
LESS_THAN = "LessThan"
GREATER_THAN = "GreaterThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
REGEX = "Regex"
class WebApplicationFirewallPolicyResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Resource status of the policy.
"""
CREATING = "Creating"
ENABLING = "Enabling"
ENABLED = "Enabled"
DISABLING = "Disabling"
DISABLED = "Disabled"
DELETING = "Deleting"
class WebApplicationFirewallRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes type of rule
"""
MATCH_RULE = "MatchRule"
INVALID = "Invalid"
class WebApplicationFirewallTransform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes what transforms applied before matching
"""
LOWERCASE = "Lowercase"
TRIM = "Trim"
URL_DECODE = "UrlDecode"
URL_ENCODE = "UrlEncode"
REMOVE_NULLS = "RemoveNulls"
HTML_ENTITY_DECODE = "HtmlEntityDecode"
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/_network_management_client_enums.py | Python | mit | 30,224 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import getpass
import os, subprocess
import socket
import fcntl
import struct
NEWIPADRESS = ""
SMTPSERVER = ""
SMTPUSER = ""
SMTPPORT = "25"
SMTPPASSWD = ""
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def fileSetup():
global NEWIPADRESS
global SMTPSERVER
global SMTPUSER
global SMTPPORT
global SMTPPASSWD
SOURCEFILELIST = ['gerrit.config','secure.config','config_inc.php']
TARGETFILELIST = ['/my_services/gerrit/etc/gerrit.config','/my_services/gerrit/etc/secure.config','/my_services/mantisbt/config/config_inc.php']
CHANGEDFILELIST = []
# Check if template files are available
for fileName in SOURCEFILELIST:
if not os.path.isfile(fileName):
print fileName + " template file is missing. Aborting..."
os.exit(99)
# Check if target files are available
for fileName in TARGETFILELIST:
if not os.path.isfile(fileName):
print fileName + " target file is missing. Aborting..."
os.exit(99)
# Read template files and replace info provided by user
for position in range(0,len(SOURCEFILELIST)):
temp = ""
with open(SOURCEFILELIST[position],'r') as f:
temp = f.read()
temp = temp.replace('NEWIPADRESS',NEWIPADRESS)
temp = temp.replace('SMTPSERVER',SMTPSERVER)
temp = temp.replace('SMTPUSER',SMTPUSER)
temp = temp.replace('SMTPPORT',SMTPPORT)
temp = temp.replace('SMTPPASSWD',SMTPPASSWD)
if SMTPSERVER == '':
temp = temp.replace('SMTPENABLE','false')
temp = temp.replace('SMTPTYPE','None')
temp = temp.replace('SMTPONOFF','OFF')
else:
temp = temp.replace('SMTPENABLE','true')
temp = temp.replace('SMTPTYPE','SMTP')
temp = temp.replace('SMTPONOFF','ON')
CHANGEDFILELIST.append(temp)
# Replace configuration file content
for position in range(0,len(SOURCEFILELIST)):
temp = ""
with open(TARGETFILELIST[position],'w') as f:
f.write(CHANGEDFILELIST[position])
def updateDefaultPage():
subprocess.call(['unzip','-oq','default_page.zip','-d','/var/www/html/'])
def updateGerritHooks():
subprocess.call(['sudo','-u','gerrit','git','--work-tree=/my_services/gerrit/hooks','--git-dir=/my_services/gerrit/hooks/.git','pull','-q'])
def serviceStop(name):
pid = ""
print "Stopping service: " + name
subprocess.call(['service', name, 'stop'])
def systemRestart():
subprocess.call(['shutdown', '-r', 'now'])
def daemonReload():
print "Reloading daemon configuration..."
subprocess.call(['systemctl','daemon-reload'])
def main():
global NEWIPADRESS
global SMTPSERVER
global SMTPUSER
global SMTPPORT
global SMTPPASSWD
NEWIPADRESS = get_ip_address('eth0')
# Request to user Raspberry Pi IP address
print("\nCurrent Raspberry pi IP address is: " + NEWIPADRESS)
temp = raw_input('Please enter a different IP address if necessary: ')
if temp != "": NEWIPADRESS = temp
print "\nAll services will be available under IP: " + NEWIPADRESS + "\n"
print "IMPORTANT - Leave next question blank if you want to disable Mantis/Gerrit mail notification\n"
# Request to user SMTP server data
temp = raw_input('Please enter your SMTP (e-mail) server address: ')
if temp != "":
SMTPSERVER = temp
temp = raw_input('Please enter your SMTP (e-mail) server port number (leave blank for default 25): ')
if temp != "": SMTPPORT = temp
temp = raw_input('Please enter your SMTP (e-mail) server username: ')
if temp != "": SMTPUSER = temp
temp = getpass.getpass('Please enter your SMTP (e-mail) server password: ')
if temp != "": SMTPPASSWD = temp
print "\nStopping services..."
serviceStop('gerrit')
serviceStop('apache2')
serviceStop('jenkins')
print "\nSetting up configuration files..."
fileSetup()
# Force daemon configuration file reload
daemonReload()
# Update apache default home page
updateDefaultPage()
# Update gerrit hooks
updateGerritHooks()
print "\nRestarting system..."
systemRestart()
print "\n"
if __name__ == '__main__':
main()
| mmmarq/scm_box_setup | scm_tool_config.py | Python | gpl-3.0 | 4,354 |
"""The epsonworkforce component."""
| turbokongen/home-assistant | homeassistant/components/epsonworkforce/__init__.py | Python | apache-2.0 | 36 |
# vim: set encoding=utf-8
from unittest import TestCase
from lxml import etree
from regparser.notice import build, changes
from regparser.notice.amdparser import DesignateAmendment, Amendment
from regparser.test_utils.xml_builder import XMLBuilder
from regparser.tree.xml_parser.preprocessors import ParseAMDPARs
from regparser.tree.struct import Node
class NoticeBuildTest(TestCase):
def test_build_notice(self):
fr = {
'abstract': 'sum sum sum',
'action': 'actact',
'agency_names': ['Agency 1', 'Agency 2'],
'cfr_references': [{'title': 12, 'part': 9191},
{'title': 12, 'part': 9292}],
'citation': 'citation citation',
'comments_close_on': None,
'dates': 'date info',
'document_number': '7878-111',
'effective_on': '1956-09-09',
'end_page': 9999,
'full_text_xml_url': None,
'html_url': 'some url',
'publication_date': '1955-12-10',
'regulation_id_numbers': ['a231a-232q'],
'start_page': 8888,
'type': 'Rule',
'volume': 66,
}
notices = build.build_notice('5', '9292', fr)
self.assertEqual(1, len(notices))
actual_notice = notices[0]
for key in ['agency_names', 'cfr_parts']:
actual_notice[key] = sorted(actual_notice[key])
self.assertEqual(actual_notice, {
'abstract': 'sum sum sum',
'action': 'actact',
'agency_names': ['Agency 1', 'Agency 2'],
'cfr_parts': ['9191', '9292'],
'cfr_title': '5',
'document_number': '7878-111',
'effective_on': '1956-09-09',
'fr_citation': 'citation citation',
'fr_url': 'some url',
'fr_volume': 66,
'initial_effective_on': '1956-09-09',
'meta': {
'dates': 'date info',
'end_page': 9999,
'start_page': 8888,
'type': 'Rule'
},
'publication_date': '1955-12-10',
'regulation_id_numbers': ['a231a-232q'],
})
def test_process_xml(self):
"""Integration test for xml processing"""
with XMLBuilder("ROOT") as ctx:
with ctx.SUPLINF():
with ctx.FURINF():
ctx.HD("CONTACT INFO:")
ctx.P("Extra contact info here")
with ctx.ADD():
ctx.P("Email: example@example.com")
ctx.P("Extra instructions")
ctx.HD("Supplementary Info", SOURCE="HED")
ctx.HD("V. Section-by-Section Analysis", SOURCE="HD1")
ctx.HD("8(q) Words", SOURCE="HD2")
ctx.P("Content")
ctx.HD("Section that follows", SOURCE="HD1")
ctx.P("Following Content")
notice = {'cfr_parts': ['9292'], 'meta': {'start_page': 100}}
self.assertEqual(build.process_xml(notice, ctx.xml), {
'cfr_parts': ['9292'],
'footnotes': {},
'meta': {'start_page': 100},
'addresses': {
'methods': [('Email', 'example@example.com')],
'instructions': ['Extra instructions']
},
'contact': 'Extra contact info here',
'section_by_section': [{
'title': '8(q) Words',
'paragraphs': ['Content'],
'children': [],
'footnote_refs': [],
'page': 100,
'labels': ['9292-8-q']
}],
})
def test_process_xml_missing_fields(self):
with XMLBuilder("ROOT") as ctx:
with ctx.SUPLINF():
ctx.HD("Supplementary Info", SOURCE="HED")
ctx.HD("V. Section-by-Section Analysis", SOURCE="HD1")
ctx.HD("8(q) Words", SOURCE="HD2")
ctx.P("Content")
ctx.HD("Section that follows", SOURCE="HD1")
ctx.P("Following Content")
notice = {'cfr_parts': ['9292'], 'meta': {'start_page': 210}}
self.assertEqual(build.process_xml(notice, ctx.xml), {
'cfr_parts': ['9292'],
'footnotes': {},
'meta': {'start_page': 210},
'section_by_section': [{
'title': '8(q) Words',
'paragraphs': ['Content'],
'children': [],
'footnote_refs': [],
'page': 210,
'labels': ['9292-8-q']
}],
})
def test_process_xml_fill_effective_date(self):
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective January 1, 2002")
notice = {'cfr_parts': ['902'], 'meta': {'start_page': 10},
'effective_on': '2002-02-02'}
notice = build.process_xml(notice, ctx.xml)
self.assertEqual('2002-02-02', notice['effective_on'])
notice = {'cfr_parts': ['902'], 'meta': {'start_page': 10}}
notice = build.process_xml(notice, ctx.xml)
# Uses the date found in the XML
self.assertEqual('2002-01-01', notice['effective_on'])
notice = {'cfr_parts': ['902'], 'meta': {'start_page': 10},
'effective_on': None}
notice = build.process_xml(notice, ctx.xml)
# Uses the date found in the XML
self.assertEqual('2002-01-01', notice['effective_on'])
def test_add_footnotes(self):
with XMLBuilder("ROOT") as ctx:
ctx.P("Some text")
ctx.child_from_string(
'<FTNT><P><SU>21</SU>Footnote text</P></FTNT>')
ctx.child_from_string(
'<FTNT><P><SU>43</SU>This has a<PRTPAGE P="2222" />break'
'</P></FTNT>')
ctx.child_from_string(
'<FTNT><P><SU>98</SU>This one has<E T="03">emph</E>tags</P>'
'</FTNT>')
notice = {}
build.add_footnotes(notice, ctx.xml)
self.assertEqual(notice, {'footnotes': {
'21': 'Footnote text',
'43': 'This has a break',
'98': 'This one has <em data-original="E-03">emph</em> tags'
}})
def test_process_designate_subpart(self):
p_list = ['200-?-1-a', '200-?-1-b']
destination = '205-Subpart:A'
amended_label = DesignateAmendment('DESIGNATE', p_list, destination)
subpart_changes = build.process_designate_subpart(amended_label)
self.assertItemsEqual(['200-1-a', '200-1-b'], subpart_changes.keys())
for p, change in subpart_changes.items():
self.assertEqual(change['destination'], ['205', 'Subpart', 'A'])
self.assertEqual(change['action'], 'DESIGNATE')
def test_process_amendments(self):
with XMLBuilder("REGTEXT", PART="105", TITLE="12") as ctx:
with ctx.SUBPART():
ctx.HD(u"Subpart A—General", SOURCE="HED")
ctx.AMDPAR(u"2. Designate §§ 105.1 through 105.3 as subpart A "
u"under the heading.")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, ctx.xml)
section_list = ['105-2', '105-3', '105-1']
self.assertItemsEqual(notice['changes'].keys(), section_list)
for l, c in notice['changes'].items():
change = c[0]
self.assertEqual(change['destination'], ['105', 'Subpart', 'A'])
self.assertEqual(change['action'], 'DESIGNATE')
def test_process_amendments_section(self):
with XMLBuilder("REGTEXT", PART="105", TITLE="12") as ctx:
ctx.AMDPAR(u"3. In § 105.1, revise paragraph (b) to read as "
u"follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 105.1")
ctx.SUBJECT("Purpose.")
ctx.STARS()
ctx.P("(b) This part carries out.")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, ctx.xml)
self.assertItemsEqual(notice['changes'].keys(), ['105-1-b'])
changes = notice['changes']['105-1-b'][0]
self.assertEqual(changes['action'], 'PUT')
self.assertTrue(changes['node']['text'].startswith(
u'(b) This part carries out.'))
def test_process_amendments_multiple_in_same_parent(self):
with XMLBuilder("REGTEXT", PART="105", TITLE="12") as ctx:
ctx.AMDPAR(u"1. In § 105.1, revise paragraph (b) to read as "
u"follows:")
ctx.AMDPAR("2. Also, revise paragraph (c):")
with ctx.SECTION():
ctx.SECTNO(u"§ 105.1")
ctx.SUBJECT("Purpose.")
ctx.STARS()
ctx.P("(b) This part carries out.")
ctx.P("(c) More stuff")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, ctx.xml)
self.assertItemsEqual(notice['changes'].keys(), ['105-1-b', '105-1-c'])
changes = notice['changes']['105-1-b'][0]
self.assertEqual(changes['action'], 'PUT')
self.assertEqual(changes['node']['text'].strip(),
u'(b) This part carries out.')
changes = notice['changes']['105-1-c'][0]
self.assertEqual(changes['action'], 'PUT')
self.assertTrue(changes['node']['text'].strip(),
u'(c) More stuff')
def test_process_amendments_restart_new_section(self):
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(PART="104", TITLE="12"):
ctx.AMDPAR("1. In Supplement I to Part 104, comment "
"22(a) is added")
ctx.P("Content")
with ctx.REGTEXT(PART="105", TITLE="12"):
ctx.AMDPAR(u"3. In § 105.1, revise paragraph (b) to read as "
u"follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 105.1")
ctx.SUBJECT("Purpose.")
ctx.STARS()
ctx.P("(b) This part carries out.")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, ctx.xml)
self.assertEqual(2, len(notice['amendments']))
c22a, b = notice['amendments']
self.assertEqual(c22a.action, 'POST')
self.assertEqual(b.action, 'PUT')
self.assertEqual(c22a.label, ['104', '22', 'a', 'Interp'])
self.assertEqual(b.label, ['105', '1', 'b'])
def test_process_amendments_no_nodes(self):
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(PART="104", TITLE="12"):
ctx.AMDPAR(u"1. In § 104.13, paragraph (b) is removed")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['104']}
build.process_amendments(notice, ctx.xml)
self.assertEqual(1, len(notice['amendments']))
delete = notice['amendments'][0]
self.assertEqual(delete.action, 'DELETE')
self.assertEqual(delete.label, ['104', '13', 'b'])
def test_process_amendments_markerless(self):
with XMLBuilder("REGTEXT", PART="105", TITLE="12") as ctx:
ctx.AMDPAR(u"1. Revise [label:105-11-p5] as blah")
with ctx.SECTION():
ctx.SECTNO(u"§ 105.11")
ctx.SUBJECT("Purpose.")
ctx.STARS()
ctx.P("Some text here")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, ctx.xml)
self.assertItemsEqual(notice['changes'].keys(), ['105-11-p5'])
changes = notice['changes']['105-11-p5'][0]
self.assertEqual(changes['action'], 'PUT')
def test_process_amendments_multiple_sections(self):
"""Regression test verifying multiple SECTIONs in the same REGTEXT"""
with XMLBuilder("REGTEXT", PART="111") as ctx:
ctx.AMDPAR(u"1. Modify § 111.22 by revising paragraph (b)")
with ctx.SECTION():
ctx.SECTNO(u"§ 111.22")
ctx.SUBJECT("Subject Here.")
ctx.STARS()
ctx.P("(b) Revised second paragraph")
ctx.AMDPAR(u"2. Modify § 111.33 by revising paragraph (c)")
with ctx.SECTION():
ctx.SECTNO(u"§ 111.33")
ctx.SUBJECT("Another Subject")
ctx.STARS()
ctx.P("(c) Revised third paragraph")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['111']}
build.process_amendments(notice, ctx.xml)
self.assertItemsEqual(notice['changes'].keys(),
['111-22-b', '111-33-c'])
def new_subpart_xml(self):
with XMLBuilder("RULE") as ctx:
with ctx.REGTEXT(PART="105", TITLE="12"):
ctx.AMDPAR(u"3. In § 105.1, revise paragraph (b) to read as"
u"follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 105.1")
ctx.SUBJECT("Purpose.")
ctx.STARS()
ctx.P("(b) This part carries out.")
with ctx.REGTEXT(PART="105", TITLE="12"):
ctx.AMDPAR("6. Add subpart B to read as follows:")
with ctx.CONTENTS():
with ctx.SUBPART():
ctx.SECHD("Sec.")
ctx.SECTNO("105.30")
ctx.SUBJECT("First In New Subpart.")
with ctx.SUBPART():
ctx.HD(u"Subpart B—Requirements", SOURCE="HED")
with ctx.SECTION():
ctx.SECTNO("105.30")
ctx.SUBJECT("First In New Subpart")
ctx.P("For purposes of this subpart, the follow "
"apply:")
ctx.P('(a) "Agent" means agent.')
ParseAMDPARs().transform(ctx.xml)
return ctx.xml
def test_process_new_subpart(self):
par = self.new_subpart_xml().xpath('//AMDPAR')[1]
amended_label = Amendment('POST', '105-Subpart:B')
notice = {'cfr_parts': ['105']}
subpart_changes = build.process_new_subpart(notice, amended_label, par)
new_nodes_added = ['105-Subpart-B', '105-30', '105-30-a']
self.assertItemsEqual(new_nodes_added, subpart_changes.keys())
for l, n in subpart_changes.items():
self.assertEqual(n['action'], 'POST')
self.assertEqual(
subpart_changes['105-Subpart-B']['node']['node_type'], 'subpart')
def test_process_amendments_subpart(self):
notice = {'cfr_parts': ['105']}
build.process_amendments(notice, self.new_subpart_xml())
self.assertTrue('105-Subpart-B' in notice['changes'].keys())
self.assertTrue('105-30-a' in notice['changes'].keys())
self.assertTrue('105-30' in notice['changes'].keys())
def test_process_amendments_mix_regs(self):
"""Some notices apply to multiple regs. For now, just ignore the
sections not associated with the reg we're focused on"""
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(PART="105", TITLE="12"):
ctx.AMDPAR(u"3. In § 105.1, revise paragraph (a) to read as "
u"follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 105.1")
ctx.SUBJECT("105Purpose.")
ctx.P("(a) 105Content")
with ctx.REGTEXT(PART="106", TITLE="12"):
ctx.AMDPAR(u"3. In § 106.3, revise paragraph (b) to read as "
u"follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 106.3")
ctx.SUBJECT("106Purpose.")
ctx.P("(b) Content")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105', '106']}
build.process_amendments(notice, ctx.xml)
self.assertEqual(2, len(notice['changes']))
self.assertTrue('105-1-a' in notice['changes'])
self.assertTrue('106-3-b' in notice['changes'])
def test_process_amendments_context(self):
"""Context should carry over between REGTEXTs"""
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(TITLE="12"):
ctx.AMDPAR(u"3. In § 106.1, revise paragraph (a) to read as "
u"follows:")
with ctx.REGTEXT(TITLE="12"):
ctx.AMDPAR("3. Add appendix C")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['105', '106']}
build.process_amendments(notice, ctx.xml)
self.assertEqual(2, len(notice['amendments']))
amd1, amd2 = notice['amendments']
self.assertEqual(['106', '1', 'a'], amd1.label)
self.assertEqual(['106', 'C'], amd2.label)
def test_process_amendments_insert_in_order(self):
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(TITLE="10"):
ctx.AMDPAR('[insert-in-order] [label:123-45-p6]')
with ctx.SECTION():
ctx.SECTNO(u"§ 123.45")
ctx.SUBJECT("Some Subject.")
ctx.STARS()
ctx.P("This is the sixth paragraph")
ctx.STARS()
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['123']}
build.process_amendments(notice, ctx.xml)
self.assertEqual(1, len(notice['amendments']))
amendment = notice['amendments'][0]
self.assertEqual(['123', '45', 'p6'], amendment.label)
self.assertEqual('INSERT', amendment.action)
def test_introductory_text(self):
""" Sometimes notices change just the introductory text of a paragraph
(instead of changing the entire paragraph tree). """
with XMLBuilder("REGTEXT", PART="106", TITLE="12") as ctx:
ctx.AMDPAR(u"3. In § 106.2, revise the introductory text to read "
u"as follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 106.2")
ctx.SUBJECT(" Definitions ")
ctx.P(" Except as otherwise provided, the following apply. ")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['106']}
build.process_amendments(notice, ctx.xml)
self.assertEqual('[text]', notice['changes']['106-2'][0]['field'])
def test_multiple_changes(self):
""" A notice can have two modifications to a paragraph. """
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(PART="106", TITLE="12"):
ctx.AMDPAR(u"2. Designate §§ 106.1 through 106.3 as subpart "
u"A under the heading.")
with ctx.REGTEXT(PART="106", TITLE="12"):
ctx.AMDPAR(u"3. In § 106.2, revise the introductory text to "
u"read as follows:")
with ctx.SECTION():
ctx.SECTNO(u"§ 106.2")
ctx.SUBJECT(" Definitions ")
ctx.P(" Except as otherwise provided, the following "
"apply. ")
ParseAMDPARs().transform(ctx.xml)
notice = {'cfr_parts': ['106']}
build.process_amendments(notice, ctx.xml)
self.assertEqual(2, len(notice['changes']['106-2']))
def test_create_xmlless_changes(self):
labels_amended = [Amendment('DELETE', '200-2-a'),
Amendment('MOVE', '200-2-b', '200-2-c')]
notice_changes = changes.NoticeChanges()
build.create_xmlless_changes(labels_amended, notice_changes)
delete = notice_changes.changes['200-2-a'][0]
move = notice_changes.changes['200-2-b'][0]
self.assertEqual({'action': 'DELETE'}, delete)
self.assertEqual({'action': 'MOVE', 'destination': ['200', '2', 'c']},
move)
def test_create_xml_changes_reserve(self):
labels_amended = [Amendment('RESERVE', '200-2-a')]
n2a = Node('[Reserved]', label=['200', '2', 'a'])
n2 = Node('n2', label=['200', '2'], children=[n2a])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
reserve = notice_changes.changes['200-2-a'][0]
self.assertEqual(reserve['action'], 'RESERVE')
self.assertEqual(reserve['node']['text'], u'[Reserved]')
def test_create_xml_changes_stars(self):
labels_amended = [Amendment('PUT', '200-2-a')]
n2a1 = Node('(1) Content', label=['200', '2', 'a', '1'])
n2a2 = Node('(2) Content', label=['200', '2', 'a', '2'])
n2a = Node('(a) * * *', label=['200', '2', 'a'], children=[n2a1, n2a2])
n2 = Node('n2', label=['200', '2'], children=[n2a])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
for label in ('200-2-a-1', '200-2-a-2'):
self.assertTrue(label in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes[label]))
change = notice_changes.changes[label][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('KEEP', change['action'])
self.assertFalse('field' in change)
def test_create_xml_changes_stars_hole(self):
labels_amended = [Amendment('PUT', '200-2-a')]
n2a1 = Node('(1) * * *', label=['200', '2', 'a', '1'])
n2a2 = Node('(2) a2a2a2', label=['200', '2', 'a', '2'])
n2a = Node('(a) aaa', label=['200', '2', 'a'], children=[n2a1, n2a2])
n2 = Node('n2', label=['200', '2'], children=[n2a])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
for label in ('200-2-a', '200-2-a-2'):
self.assertTrue(label in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes[label]))
change = notice_changes.changes[label][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
self.assertTrue('200-2-a-1' in notice_changes.changes)
self.assertEqual(1, len(notice_changes.changes['200-2-a-1']))
change = notice_changes.changes['200-2-a-1'][0]
self.assertEqual('KEEP', change['action'])
self.assertFalse('field' in change)
def test_create_xml_changes_child_stars(self):
labels_amended = [Amendment('PUT', '200-2-a')]
xml = etree.fromstring("<ROOT><P>(a) Content</P><STARS /></ROOT>")
n2a = Node('(a) Content', label=['200', '2', 'a'],
source_xml=xml.xpath('//P')[0])
n2b = Node('(b) Content', label=['200', '2', 'b'])
n2 = Node('n2', label=['200', '2'], children=[n2a, n2b])
root = Node('root', label=['200'], children=[n2])
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertFalse('field' in change)
n2a.text = n2a.text + ":"
n2a.source_xml.text = n2a.source_xml.text + ":"
notice_changes = changes.NoticeChanges()
build.create_xml_changes(labels_amended, root, notice_changes)
self.assertTrue('200-2-a' in notice_changes.changes)
self.assertTrue(1, len(notice_changes.changes['200-2-a']))
change = notice_changes.changes['200-2-a'][0]
self.assertEqual('PUT', change['action'])
self.assertEqual('[text]', change.get('field'))
def test_split_doc_num(self):
doc_num = '2013-2222'
effective_date = '2014-10-11'
self.assertEqual(
'2013-2222_20141011',
build.split_doc_num(doc_num, effective_date))
def test_set_document_numbers(self):
notice = {'document_number': '111', 'effective_on': '2013-10-08'}
notices = build.set_document_numbers([notice])
self.assertEqual(notices[0]['document_number'], '111')
second_notice = {'document_number': '222',
'effective_on': '2013-10-10'}
notices = build.set_document_numbers([notice, second_notice])
self.assertEqual(notices[0]['document_number'], '111_20131008')
self.assertEqual(notices[1]['document_number'], '222_20131010')
def test_fetch_cfr_parts(self):
with XMLBuilder("RULE") as ctx:
with ctx.PREAMB():
ctx.CFR("12 CFR Parts 1002, 1024, and 1026")
result = build.fetch_cfr_parts(ctx.xml)
self.assertEqual(result, ['1002', '1024', '1026'])
| cmc333333/regulations-parser | tests/notice_build_tests.py | Python | cc0-1.0 | 25,573 |
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.fixtures import ConfigFixture
from touchdown.tests.stubs.aws import ElasticIpStubber
from .aws import StubberTestCase
class TestElasticIpCreation(StubberTestCase):
def test_create_elastic_ip(self):
# There is no local state. It should just make a new one.
goal = self.create_goal("apply")
config = self.fixtures.enter_context(ConfigFixture(goal, self.workspace))
public_ip = config.add_string(name="network.nat-elastic-ip")
elastic_ip = self.fixtures.enter_context(
ElasticIpStubber(
goal.get_service(
self.aws.add_elastic_ip(
name="test-elastic_ip", public_ip=public_ip
),
"apply",
)
)
)
elastic_ip.add_allocate_address()
goal.execute()
def test_recreate_elastic_ip(self):
# It should look up 8.8.4.4 (from the local state), find it no longer exists and allocate a new eip
goal = self.create_goal("apply")
config = self.fixtures.enter_context(ConfigFixture(goal, self.workspace))
public_ip = config.add_string(name="network.nat-elastic-ip")
goal.get_service(public_ip, "set").execute("8.8.4.4")
elastic_ip = self.fixtures.enter_context(
ElasticIpStubber(
goal.get_service(
self.aws.add_elastic_ip(
name="test-elastic_ip", public_ip=public_ip
),
"apply",
)
)
)
elastic_ip.add_describe_addresses_empty_response("8.8.4.4")
elastic_ip.add_allocate_address()
goal.execute()
def test_create_elastic_ip_idempotent(self):
# It should look up 8.8.8.8 and find it - there is nothing to do.
goal = self.create_goal("apply")
config = self.fixtures.enter_context(ConfigFixture(goal, self.workspace))
public_ip = config.add_string(name="network.nat-elastic-ip")
goal.get_service(public_ip, "set").execute("8.8.8.8")
elastic_ip = self.fixtures.enter_context(
ElasticIpStubber(
goal.get_service(
self.aws.add_elastic_ip(
name="test-elastic_ip", public_ip=public_ip
),
"apply",
)
)
)
elastic_ip.add_describe_addresses_one_response("8.8.8.8")
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(elastic_ip.resource)), 0)
class TestElasticIpDestroy(StubberTestCase):
def test_destroy_elastic_ip(self):
# It should look up 8.8.8.8 (from local state) and find it - delete it
goal = self.create_goal("destroy")
config = self.fixtures.enter_context(ConfigFixture(goal, self.workspace))
public_ip = config.add_string(name="network.nat-elastic-ip")
goal.get_service(public_ip, "set").execute("8.8.8.8")
elastic_ip = self.fixtures.enter_context(
ElasticIpStubber(
goal.get_service(
self.aws.add_elastic_ip(
name="test-elastic_ip", public_ip=public_ip
),
"destroy",
)
)
)
elastic_ip.add_describe_addresses_one_response("8.8.8.8")
elastic_ip.add_release_address()
goal.execute()
def test_destroy_elastic_ip_idempotent_no_local_state(self):
# There is no local state - no API calls are made
goal = self.create_goal("destroy")
config = self.fixtures.enter_context(ConfigFixture(goal, self.workspace))
public_ip = config.add_string(name="network.nat-elastic-ip")
elastic_ip = self.fixtures.enter_context(
ElasticIpStubber(
goal.get_service(
self.aws.add_elastic_ip(
name="test-elastic_ip", public_ip=public_ip
),
"destroy",
)
)
)
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(elastic_ip.resource)), 0)
def test_destroy_elastic_ip_idempotent(self):
# It should look up 8.8.8.8 and not find it. No further API calls.
goal = self.create_goal("destroy")
config = self.fixtures.enter_context(ConfigFixture(goal, self.workspace))
public_ip = config.add_string(name="network.nat-elastic-ip")
goal.get_service(public_ip, "set").execute("8.8.8.8")
elastic_ip = self.fixtures.enter_context(
ElasticIpStubber(
goal.get_service(
self.aws.add_elastic_ip(
name="test-elastic_ip", public_ip=public_ip
),
"destroy",
)
)
)
elastic_ip.add_describe_addresses_empty_response("8.8.8.8")
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(elastic_ip.resource)), 0)
| yaybu/touchdown | touchdown/tests/test_aws_vpc_elastic_ip.py | Python | apache-2.0 | 5,757 |
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Module used to convert ChipWhisperer project files into Riscure's trs format.
Example:
./cw_to_trs.py --input=path/to/project_name --output=project_name.trs
The --export-key option can be used to include the test key in the traces, but
it is not required.
"""
import argparse
import binascii
import chipwhisperer as cw
import trsfile
from tqdm import tqdm
def gen_trs_headers(project, export_key):
"""Returns a trs file header with trace metadata information.
Args:
project: ChipWhisperer traces project.
export_key: Set to True to include the size of the key in the LENGTH_DATA
field.
Returns:
trs file header.
"""
return {
trsfile.Header.LABEL_X: 's',
trsfile.Header.LABEL_Y: 'V',
trsfile.Header.NUMBER_SAMPLES: len(project.waves[0]),
trsfile.Header.TRACE_TITLE: 'title',
trsfile.Header.TRACE_OVERLAP: False,
trsfile.Header.GO_LAST_TRACE: False,
trsfile.Header.SAMPLE_CODING: trsfile.SampleCoding.FLOAT,
trsfile.Header.LENGTH_DATA: len(gen_trs_data(project.traces[0],
export_key)),
# TODO: Hardcoded to 200mV. Consider calculating the range directly from
# the traces.
trsfile.Header.ACQUISITION_RANGE_OF_SCOPE: 0.200,
trsfile.Header.ACQUISITION_COUPLING_OF_SCOPE: 1,
trsfile.Header.ACQUISITION_OFFSET_OF_SCOPE: 0.0,
trsfile.Header.ACQUISITION_DEVICE_ID: b'CWLite',
trsfile.Header.ACQUISITION_TYPE_FILTER: 0,
}
def calc_data_offsets(trace, export_key, header):
"""Calculate trs data offsets to textin, textout and key.
Args:
header: trs header. To be modified in place.
export_key: Set to True to add KEY_OFFSET and KEY_LENGTH to the trs header.
trace: ChipWhisperer trace.
"""
input_offset = 0
input_len = len(trace.textin)
output_offset = input_offset + input_len
output_len = len(trace.textout)
key_offset = output_offset + output_len
key_len = len(trace.key)
header.update({
trsfile.Header.INPUT_OFFSET: input_offset,
trsfile.Header.INPUT_LENGTH: input_len,
trsfile.Header.OUTPUT_OFFSET: output_offset,
trsfile.Header.OUTPUT_LENGTH: output_len,
})
if export_key:
header.update({
trsfile.Header.KEY_OFFSET: key_offset,
trsfile.Header.KEY_LENGTH: key_len,
})
def gen_trs_data(trace, export_key):
"""Returns serialized trace textin, textout and key data in string format
Args:
trace: ChipWhisperer trace.
export_key: Set to True to append the key to the end of the result string.
Returns:
Binary encoded concatenation of textin, textout and key.
"""
data = bytearray(trace.textin) + bytearray(trace.textout)
if export_key:
return data + bytearray(trace.key)
return data
def cw_project_to_trs(project_name, trs_filename, export_keys):
"""Converts ChipWhisperer project into trs trace format.
Args:
project_name: Path to ChipWhisperer capture project.
trs_filename: Output filename for trs result.
export_keys: Set to true to include the keys in the trs output.
"""
print(f'input project: {project_name}')
p = cw.open_project(project_name)
print(f'num_traces: {len(p.traces)}')
print(f'num_samples per trace: {len(p.waves[0])}')
print(f'output file: {trs_filename}')
h = gen_trs_headers(p, export_keys)
calc_data_offsets(p.traces[0], export_keys, h)
traces = []
for trace in tqdm(p.traces, desc='Converting', ncols=80):
traces.append(trsfile.Trace(trsfile.SampleCoding.FLOAT, trace.wave,
data=gen_trs_data(trace, export_keys)))
print('Writing output file, this may take a while.')
with trsfile.trs_open(trs_filename, 'w', engine='TrsEngine', headers=h,
live_update=True) as t:
t.extend(traces)
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
'-i',
type=str,
required=True,
help="Input ChipWhisperer project.")
parser.add_argument('--output',
'-o',
type=str,
required=True,
help="Output trs filename.")
parser.add_argument('--export-key',
'-k',
action='store_true',
help="Include keys in data output.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
cw_project_to_trs(args.input, args.output, args.export_key)
| lowRISC/ot-sca | util/cw_to_trs.py | Python | apache-2.0 | 4,728 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Jonathan Schultz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import sys
import argparse
import fnmatch
from NVivoNorm import NVivoNorm
from sqlalchemy import *
exec(open(os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'DataTypes.py').read())
def saveSources(arglist):
parser = argparse.ArgumentParser(description='Save sources from a normalised NVivo file',
fromfile_prefix_chars='@')
parser.add_argument('-v', '--verbosity', type=int, default=1)
parser.add_argument('--no-comments', action='store_true', help='Do not produce a comments logfile')
parser.add_argument('-s', '--source', type=str, default = '%',
help='Source or name or pattern')
parser.add_argument('-p', '--path', type=str, default='.',
help='Output file directory')
parser.add_argument('infile', type=str,
help='Input normalised file')
args = parser.parse_args()
hiddenargs = ['verbosity']
try:
if not args.no_comments:
logfilename = os.path.join(args.path, 'saveSources.log')
comments = (' ' + args.path + ' ').center(80, '#') + '\n'
comments += '# ' + os.path.basename(sys.argv[0]) + '\n'
arglist = args.__dict__.keys()
for arg in arglist:
if arg not in hiddenargs:
val = getattr(args, arg)
if type(val) == str:
comments += '# --' + arg + '="' + val + '"\n'
elif type(val) == bool:
if val:
comments += '# --' + arg + '\n'
elif type(val) == list:
for valitem in val:
if type(valitem) == str:
comments += '# --' + arg + '="' + valitem + '"\n'
else:
comments += '# --' + arg + '=' + str(valitem) + '\n'
elif val is not None:
comments += '# --' + arg + '=' + str(val) + '\n'
with open(logfilename, 'w') as logfile:
logfile.write(comments)
norm = NVivoNorm(args.infile)
norm.begin()
query = select([norm.Source.c.Name, norm.Source.c.Object, norm.Source.c.ObjectType]).where(
norm.Source.c.Name.like(literal(args.source)))
for row in norm.con.execute(query):
outfile = open(os.path.join(args.path, row.Name + '.' + row.ObjectType.lower()), 'wb')
outfile.write(row.Object)
outfile.close()
except:
raise
norm.rollback()
del norm
if __name__ == '__main__':
saveSources(None)
| BarraQDA/nvivotools | saveSources.py | Python | gpl-3.0 | 3,534 |
import pytest
from conftest import assert_bash_exec
@pytest.mark.bashcomp(
ignore_env=r"^[+-]((BASHOPTS|MANPATH)=|shopt -. failglob)"
)
class TestMan:
manpath = "$PWD/man"
assumed_present = "man"
@pytest.fixture
def colonpath(self, request, bash):
try:
assert_bash_exec(bash, "uname -s 2>&1 | grep -qiF cygwin")
except AssertionError:
pass
else:
pytest.skip("Cygwin doesn't like paths with colons")
return
assert_bash_exec(bash, "mkdir -p $TESTDIR/../tmp/man/man3")
assert_bash_exec(
bash, "touch $TESTDIR/../tmp/man/man3/Bash::Completion.3pm.gz"
)
request.addfinalizer(
lambda: assert_bash_exec(bash, "rm -r $TESTDIR/../tmp/man")
)
@pytest.mark.complete(
"man bash-completion-testcas",
env=dict(MANPATH=manpath),
require_cmd=True,
)
def test_1(self, completion):
assert completion == "e"
@pytest.mark.complete("man man1/f", cwd="man", env=dict(MANPATH=manpath))
def test_2(self, completion):
assert completion == "oo.1"
@pytest.mark.complete("man man/", cwd="man", env=dict(MANPATH=manpath))
def test_3(self, completion):
assert completion == "quux.8"
@pytest.mark.complete(
"man %s" % assumed_present,
cwd="shared/empty_dir",
env=dict(MANPATH=manpath),
)
def test_4(self, completion):
"""
Assumed present should not be completed complete when there's no
leading/trailing colon in $MANPATH.
"""
assert not completion
@pytest.mark.complete(
"man %s" % assumed_present,
require_cmd=True,
cwd="shared/empty_dir",
env=dict(MANPATH="%s:" % manpath),
)
def test_5(self, completion):
"""Trailing colon appends system man path."""
assert completion
@pytest.mark.complete(
"man bash-completion-testcas",
require_cmd=True,
env=dict(MANPATH="%s:" % manpath),
)
def test_6(self, completion):
assert completion == "e"
@pytest.mark.complete(
"man %s" % assumed_present,
require_cmd=True,
cwd="shared/empty_dir",
env=dict(MANPATH=":%s" % manpath),
)
def test_7(self, completion):
"""Leading colon prepends system man path."""
assert completion
@pytest.mark.complete(
"man bash-completion-testcas",
require_cmd=True,
env=dict(MANPATH=":%s" % manpath),
)
def test_8(self, completion):
assert completion == "e"
@pytest.mark.complete(
"man %s" % assumed_present,
require_cmd=True,
cwd="shared/empty_dir",
pre_cmds=("shopt -s failglob",),
)
def test_9(self, bash, completion):
assert self.assumed_present in completion
assert_bash_exec(bash, "shopt -u failglob")
@pytest.mark.complete(
"man Bash::C",
require_cmd=True,
env=dict(MANPATH="%s:../tmp/man" % manpath),
)
def test_10(self, bash, colonpath, completion):
assert completion == "ompletion"
@pytest.mark.complete("man -", require_cmd=True)
def test_11(self, completion):
assert completion
@pytest.mark.complete("man -S 1", require_cmd=True)
def test_delimited_first(self, completion):
# just appends space
assert not completion
assert completion.endswith(" ")
@pytest.mark.complete("man -S 1:", require_cmd=True)
def test_delimited_after_delimiter(self, completion):
assert completion
assert "1" not in completion
@pytest.mark.complete("man -S 1:2", require_cmd=True)
def test_delimited_later(self, completion):
# just appends space
assert not completion
assert completion.endswith(" ")
@pytest.mark.complete("man -S 1:1", require_cmd=True)
def test_delimited_deduplication(self, completion):
# no completion, no space appended
assert not completion
assert not completion.endswith(" ")
| algorythmic/bash-completion | test/t/test_man.py | Python | gpl-2.0 | 4,099 |
# entry point for the websocket loop
import gevent.monkey
import redis.connection
redis.connection.socket = gevent.socket
from ws4redis.uwsgi_runserver import uWSGIWebsocketServer
application = uWSGIWebsocketServer()
| Frky/django-websocket-redis | examples/wsgi_websocket.py | Python | mit | 217 |
"""
A simple test of stills refinement using fake data.
Only the crystal is perturbed while the beam and detector are known.
"""
from __future__ import annotations
def test(args=[]):
# Python and cctbx imports
from math import pi
from cctbx.sgtbx import space_group, space_group_symbols
# Symmetry constrained parameterisation for the unit cell
from cctbx.uctbx import unit_cell
# We will set up a mock scan and a mock experiment list
from dxtbx.model import ScanFactory
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
from libtbx.test_utils import approx_equal
from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge
from scitbx import matrix
# Get module to build models using PHIL
import dials.tests.algorithms.refinement.setup_geometry as setup_geometry
# Crystal parameterisations
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ScansRayPredictor,
StillsExperimentsPredictor,
)
# Reflection prediction
from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection
# Import for surgery on reflection_tables
from dials.array_family import flex
#############################
# Setup experimental models #
#############################
master_phil = parse(
"""
include scope dials.tests.algorithms.refinement.geometry_phil
include scope dials.tests.algorithms.refinement.minimiser_phil
""",
process_includes=True,
)
# build models, with a larger crystal than default in order to get enough
# reflections on the 'still' image
param = """
geometry.parameters.crystal.a.length.range=40 50;
geometry.parameters.crystal.b.length.range=40 50;
geometry.parameters.crystal.c.length.range=40 50;
geometry.parameters.random_seed = 42"""
models = setup_geometry.Extract(
master_phil, cmdline_args=args, local_overrides=param
)
crystal = models.crystal
mydetector = models.detector
mygonio = models.goniometer
mybeam = models.beam
# Build a mock scan for a 1.5 degree wedge. Only used for generating indices near
# the Ewald sphere
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 1),
exposure_times=0.1,
oscillation=(0, 1.5),
epochs=list(range(1)),
deg=True,
)
sequence_range = myscan.get_oscillation_range(deg=False)
im_width = myscan.get_oscillation(deg=False)[1]
assert approx_equal(im_width, 1.5 * pi / 180.0)
# Build experiment lists
stills_experiments = ExperimentList()
stills_experiments.append(
Experiment(beam=mybeam, detector=mydetector, crystal=crystal, imageset=None)
)
scans_experiments = ExperimentList()
scans_experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
crystal=crystal,
goniometer=mygonio,
scan=myscan,
imageset=None,
)
)
##########################################################
# Parameterise the models (only for perturbing geometry) #
##########################################################
xlo_param = CrystalOrientationParameterisation(crystal)
xluc_param = CrystalUnitCellParameterisation(crystal)
################################
# Apply known parameter shifts #
################################
# rotate crystal (=5 mrad each rotation)
xlo_p_vals = []
p_vals = xlo_param.get_param_vals()
xlo_p_vals.append(p_vals)
new_p_vals = [a + b for a, b in zip(p_vals, [5.0, 5.0, 5.0])]
xlo_param.set_param_vals(new_p_vals)
# change unit cell (=1.0 Angstrom length upsets, 0.5 degree of
# gamma angle)
xluc_p_vals = []
p_vals = xluc_param.get_param_vals()
xluc_p_vals.append(p_vals)
cell_params = crystal.get_unit_cell().parameters()
cell_params = [a + b for a, b in zip(cell_params, [1.0, 1.0, -1.0, 0.0, 0.0, 0.5])]
new_uc = unit_cell(cell_params)
newB = matrix.sqr(new_uc.fractionalization_matrix()).transpose()
S = symmetrize_reduce_enlarge(crystal.get_space_group())
S.set_orientation(orientation=newB)
X = tuple([e * 1.0e5 for e in S.forward_independent_parameters()])
xluc_param.set_param_vals(X)
# keep track of the target crystal model to compare with refined
from copy import deepcopy
target_crystal = deepcopy(crystal)
#############################
# Generate some reflections #
#############################
# All indices in a 2.0 Angstrom sphere for crystal
resolution = 2.0
index_generator = IndexGenerator(
crystal.get_unit_cell(),
space_group(space_group_symbols(1).hall()).type(),
resolution,
)
indices = index_generator.to_array()
# Build a ray predictor and predict rays close to the Ewald sphere by using
# the narrow rotation scan
ref_predictor = ScansRayPredictor(scans_experiments, sequence_range)
obs_refs = ref_predictor(indices, experiment_id=0)
# Take only those rays that intersect the detector
intersects = ray_intersection(mydetector, obs_refs)
obs_refs = obs_refs.select(intersects)
# Add in flags and ID columns by copying into standard reflection table
tmp = flex.reflection_table.empty_standard(len(obs_refs))
tmp.update(obs_refs)
obs_refs = tmp
# Invent some variances for the centroid positions of the simulated data
im_width = 0.1 * pi / 180.0
px_size = mydetector[0].get_pixel_size()
var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2)
var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2)
var_phi = flex.double(len(obs_refs), (im_width / 2.0) ** 2)
obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
# Re-predict using the stills reflection predictor
stills_ref_predictor = StillsExperimentsPredictor(stills_experiments)
obs_refs_stills = stills_ref_predictor(obs_refs)
# Set 'observed' centroids from the predicted ones
obs_refs_stills["xyzobs.mm.value"] = obs_refs_stills["xyzcal.mm"]
###############################
# Undo known parameter shifts #
###############################
xlo_param.set_param_vals(xlo_p_vals[0])
xluc_param.set_param_vals(xluc_p_vals[0])
# make a refiner
from dials.algorithms.refinement.refiner import phil_scope
params = phil_scope.fetch(source=parse("")).extract()
# Change this to get a plot
do_plot = False
if do_plot:
params.refinement.refinery.journal.track_parameter_correlation = True
from dials.algorithms.refinement.refiner import RefinerFactory
# decrease bin_size_fraction to terminate on RMSD convergence
params.refinement.target.bin_size_fraction = 0.01
params.refinement.parameterisation.beam.fix = "all"
params.refinement.parameterisation.detector.fix = "all"
refiner = RefinerFactory.from_parameters_data_experiments(
params, obs_refs_stills, stills_experiments
)
# run refinement
history = refiner.run()
# regression tests
assert len(history["rmsd"]) == 9
refined_crystal = refiner.get_experiments()[0].crystal
uc1 = refined_crystal.get_unit_cell()
uc2 = target_crystal.get_unit_cell()
assert uc1.is_similar_to(uc2)
if do_plot:
plt = refiner.parameter_correlation_plot(
len(history["parameter_correlation"]) - 1
)
plt.show()
| dials/dials | tests/algorithms/refinement/test_stills_refinement.py | Python | bsd-3-clause | 7,750 |
"""This script finds all '.properties-MERGED' files with no relevant translation for a given language.
This script requires the python libraries: gitpython, jproperties, pyexcel-xlsx, xlsxwriter and pyexcel along with
python >= 3.9.1 or the requirements.txt file found in this directory can be used
(https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/#using-requirements-files). As a
consequence of gitpython, this project also requires git >= 1.7.0.
"""
import sys
from typing import List
from envutil import get_proj_dir
from excelutil import write_results_to_xlsx
from gitutil import get_property_file_entries, get_commit_id, get_git_root, list_paths, get_tree
from csvutil import write_results_to_csv
import argparse
from languagedictutil import find_unmatched_translations
from outputtype import OutputType
from propentry import convert_to_output, PropEntry
from propsutil import DEFAULT_PROPS_FILENAME, get_lang_bundle_name
def get_unmatched(repo_path: str, language: str, original_commit: str, translated_commit: str) -> List[PropEntry]:
"""
Get all original key values that have not been translated.
:param repo_path: Path to repo.
:param language: The language identifier (i.e. 'ja')
:param original_commit: The commit to use for original key values.
:param translated_commit: The commit to use for translated key values.
:return: The list of unmatched items
"""
original_files = filter(lambda x: x[0].endswith(DEFAULT_PROPS_FILENAME),
list_paths(get_tree(repo_path, original_commit)))
translated_name = get_lang_bundle_name(language)
translated_files = filter(lambda x: x[0].endswith(translated_name),
list_paths(get_tree(repo_path, translated_commit)))
return find_unmatched_translations(orig_file_iter=original_files, translated_file_iter=translated_files,
orig_filename=DEFAULT_PROPS_FILENAME, translated_filename=translated_name)
def main():
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(description='Gathers all key-value pairs within .properties-MERGED files that '
'have not been translated.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='output_path', type=str, help='The path to the output file. The output path should be'
' specified as a relative path with the dot slash notation '
'(i.e. \'./outputpath.xlsx\') or an absolute path.')
parser.add_argument('-r', '--repo', dest='repo_path', type=str, required=False,
help='The path to the repo. If not specified, path of script is used.')
parser.add_argument('-o', '--output-type', dest='output_type', type=OutputType, choices=list(OutputType),
required=False, help="The output type. Currently supports 'csv' or 'xlsx'.", default='xlsx')
parser.add_argument('-nc', '--no-commit', dest='no_commit', action='store_true', default=False,
required=False, help="Suppresses adding commits to the generated header.")
parser.add_argument('-nt', '--no-translated-col', dest='no_translated_col', action='store_true', default=False,
required=False, help="Don't include a column for translation.")
parser.add_argument('-l', '--language', dest='language', type=str, required=False, default=None,
help="The language identifier (i.e. ja). If specified, this only returns items where the key"
" is not translated (i.e. no matching Japanese key or value is empty)")
parser.add_argument('-oc', '--original-commit', dest='original_commit', type=str, required=False, default=None,
help="The commit to gather original keys.")
parser.add_argument('-tc', '--translated-commit', dest='translated_commit', type=str, required=False, default=None,
help="The commit to gather translations.")
args = parser.parse_args()
repo_path = args.repo_path if args.repo_path is not None else get_git_root(get_proj_dir())
output_path = args.output_path
output_type = args.output_type
translated_col = not args.no_translated_col
original_commit = args.original_commit
translated_commit = args.translated_commit
prop_entries = get_unmatched(repo_path, args.language, original_commit, translated_commit) \
if args.language else get_property_file_entries(repo_path)
processing_result = convert_to_output(prop_entries, original_commit, translated_col)
# based on https://stackoverflow.com/questions/60208/replacements-for-switch-statement-in-python
{
OutputType.csv: write_results_to_csv,
OutputType.xlsx: write_results_to_xlsx
}[output_type](processing_result, output_path)
sys.exit(0)
if __name__ == "__main__":
main()
| sleuthkit/autopsy | release_scripts/localization_scripts/unmatchedscript.py | Python | apache-2.0 | 5,117 |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Python JSON <-> Java Jabsorb format converter
Jabsorb is a serialization library for Java, converting Java beans to JSON
and vice versa.
This module is compatible with the fork of Jabsorb available at
https://github.com/isandlaTech/cohorte-org.jabsorb.ng
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Documentation strings format
__docformat__ = "restructuredtext en"
# Boot module version
__version__ = "1.0.0"
# ------------------------------------------------------------------------------
# Standard library
import inspect
import re
try:
# Python 2
# pylint: disable=F0401
import __builtin__ as builtins
except ImportError:
# Python 3
# pylint: disable=F0401
import builtins
# ------------------------------------------------------------------------------
JSON_CLASS = '__jsonclass__'
"""
Tuple used by jsonrpclib to indicate wich Python class corresponds to its
content
"""
JAVA_CLASS = "javaClass"
"""
Dictionary key used by Jabsorb to indicate which Java class corresponds to its
content
"""
JAVA_MAPS_PATTERN = re.compile(r"java\.util\.(.*Map|Properties)")
""" Pattern to detect standard Java classes for maps """
JAVA_LISTS_PATTERN = re.compile(r"java\.util\..*List")
""" Pattern to detect standard Java classes for lists """
JAVA_SETS_PATTERN = re.compile(r"java\.util\..*Set")
""" Pattern to detect standard Java classes for sets """
# ------------------------------------------------------------------------------
class HashableDict(dict):
"""
Small workaround because dictionaries are not hashable in Python
"""
def __hash__(self):
"""
Computes the hash of the dictionary
"""
return hash("HashableDict({0})".format(sorted(self.items())))
class HashableSet(set):
"""
Small workaround because sets are not hashable in Python
"""
def __hash__(self):
"""
Computes the hash of the set
"""
return hash("HashableSet({0})".format(sorted(self)))
class HashableList(list):
"""
Small workaround because lists are not hashable in Python
"""
def __hash__(self):
"""
Computes the hash of the list
"""
return hash("HashableList({0})".format(sorted(self)))
class AttributeMap(dict):
"""
Wraps a map to have the same behaviour between getattr and getitem
"""
def __init__(self, *args, **kwargs):
"""
Adds a __dict__ member to this dictionary
"""
super(AttributeMap, self).__init__(*args, **kwargs)
self.__dict__ = self
def __hash__(self):
"""
Computes the hash of the dictionary
"""
return hash("AttributeMap({0})".format(sorted(self.items())))
# ------------------------------------------------------------------------------
def _compute_jsonclass(obj):
"""
Compute the content of the __jsonclass__ field for the given object
:param obj: An object
:return: The content of the __jsonclass__ field
"""
# It's not a standard type, so it needs __jsonclass__
module_name = inspect.getmodule(obj).__name__
json_class = obj.__class__.__name__
if module_name not in ('', '__main__'):
json_class = '{0}.{1}'.format(module_name, json_class)
return [json_class, []]
def _is_builtin(obj):
"""
Checks if the type of the given object is a built-in one or not
:param obj: An object
:return: True if the object is of a built-in type
"""
module = inspect.getmodule(obj)
if module in (None, builtins):
return True
else:
return module.__name__ in ('', '__main__')
def _is_converted_class(java_class):
"""
Checks if the given Java class is one we *might* have set up
"""
if not java_class:
return False
return JAVA_MAPS_PATTERN.match(java_class) is not None \
or JAVA_LISTS_PATTERN.match(java_class) is not None \
or JAVA_SETS_PATTERN.match(java_class) is not None
# ------------------------------------------------------------------------------
def to_jabsorb(value):
"""
Adds information for Jabsorb, if needed.
Converts maps and lists to a jabsorb form.
Keeps tuples as is, to let them be considered as arrays.
:param value: A Python result to send to Jabsorb
:return: The result in a Jabsorb map format (not a JSON object)
"""
# None ?
if value is None:
return None
# Map ?
elif isinstance(value, dict):
if JAVA_CLASS in value or JSON_CLASS in value:
if not _is_converted_class(value.get(JAVA_CLASS)):
# Bean representation
converted_result = {}
for key, content in value.items():
converted_result[key] = to_jabsorb(content)
try:
# Keep the raw jsonrpclib information
converted_result[JSON_CLASS] = value[JSON_CLASS]
except KeyError:
pass
else:
# We already worked on this value
converted_result = value
else:
# Needs the whole transformation
converted_result = {JAVA_CLASS: "java.util.HashMap"}
converted_result["map"] = map_pairs = {}
for key, content in value.items():
map_pairs[key] = to_jabsorb(content)
try:
# Keep the raw jsonrpclib information
map_pairs[JSON_CLASS] = value[JSON_CLASS]
except KeyError:
pass
# List ? (consider tuples as an array)
elif isinstance(value, list):
converted_result = {JAVA_CLASS: "java.util.ArrayList",
'list': [to_jabsorb(entry) for entry in value]}
# Set ?
elif isinstance(value, (set, frozenset)):
converted_result = {JAVA_CLASS: "java.util.HashSet",
'set': [to_jabsorb(entry) for entry in value]}
# Tuple ? (used as array, except if it is empty)
elif isinstance(value, tuple):
converted_result = [to_jabsorb(entry) for entry in value]
elif hasattr(value, JAVA_CLASS):
# Class with a Java class hint: convert into a dictionary
class_members = dict((name, getattr(value, name))
for name in dir(value)
if not name.startswith('_'))
converted_result = HashableDict(
(name, to_jabsorb(content))
for name, content in class_members.items()
if not inspect.ismethod(content))
# Do not forget the Java class
converted_result[JAVA_CLASS] = getattr(value, JAVA_CLASS)
# Also add a __jsonclass__ entry
converted_result[JSON_CLASS] = _compute_jsonclass(value)
# Other ?
else:
converted_result = value
return converted_result
def from_jabsorb(request, seems_raw=False):
"""
Transforms a jabsorb request into a more Python data model (converts maps
and lists)
:param request: Data coming from Jabsorb
:param seems_raw: Set it to True if the given data seems to already have
been parsed (no Java class hint). If True, the lists will
be kept as lists instead of being converted to tuples.
:return: A Python representation of the given data
"""
if isinstance(request, (tuple, set, frozenset)):
# Special case : JSON arrays (Python lists)
return type(request)(from_jabsorb(element) for element in request)
elif isinstance(request, list):
# Check if we were a list or a tuple
if seems_raw:
return list(from_jabsorb(element) for element in request)
else:
return tuple(from_jabsorb(element) for element in request)
elif isinstance(request, dict):
# Dictionary
java_class = request.get(JAVA_CLASS)
json_class = request.get(JSON_CLASS)
seems_raw = not java_class and not json_class
if java_class:
# Java Map ?
if JAVA_MAPS_PATTERN.match(java_class) is not None:
return HashableDict((from_jabsorb(key), from_jabsorb(value))
for key, value in request["map"].items())
# Java List ?
elif JAVA_LISTS_PATTERN.match(java_class) is not None:
return HashableList(from_jabsorb(element)
for element in request["list"])
# Java Set ?
elif JAVA_SETS_PATTERN.match(java_class) is not None:
return HashableSet(from_jabsorb(element)
for element in request["set"])
# Any other case
result = AttributeMap((from_jabsorb(key),
from_jabsorb(value, seems_raw))
for key, value in request.items())
# Keep JSON class information as is
if json_class:
result[JSON_CLASS] = json_class
return result
elif not _is_builtin(request):
# Bean
for attr in dir(request):
# Only convert public fields
if not attr[0] == '_':
# Field conversion
setattr(request, attr, from_jabsorb(getattr(request, attr)))
return request
else:
# Any other case
return request
| isandlaTech/cohorte-demos | led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/pelix/misc/jabsorb.py | Python | apache-2.0 | 10,153 |
from os import environ
def pytest_addoption(parser):
parser.addini("env_files",
type="linelist",
help="a line separated list of env files to parse",
default=[])
def pytest_load_initial_conftests(args, early_config, parser):
for file in early_config.getini("env_files"):
parse_env_file(file)
def parse_env_file(file):
with open(file) as fh:
for line in fh:
# skip comments and blank lines
if line.startswith('#') or line.strip().__len__() == 0:
continue
# otherwise treat lines as environment variables in a KEY=VALUE combo
key, value = line.split('=')
environ[key.strip()] = value.strip()
| JonnyFunFun/pytest-envfiles | pytest_envfiles/plugin.py | Python | mit | 753 |
import time
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
try:
print driver.current_url
result = driver.find_element_by_id('result').get_attribute('innerHTML')
print result
assert('success' in result)
finally:
driver.quit()
| nwjs/nw.js | test/sanity/manifest-html-main-inside/test.py | Python | mit | 506 |
import threading, Queue, cStringIO
import tcp, certutils
import OpenSSL
class ServerThread(threading.Thread):
def __init__(self, server):
self.server = server
threading.Thread.__init__(self)
def run(self):
self.server.serve_forever()
def shutdown(self):
self.server.shutdown()
class ServerTestBase:
ssl = None
handler = None
addr = ("localhost", 0)
@classmethod
def setupAll(cls):
cls.q = Queue.Queue()
s = cls.makeserver()
cls.port = s.address.port
cls.server = ServerThread(s)
cls.server.start()
@classmethod
def makeserver(cls):
return TServer(cls.ssl, cls.q, cls.handler, cls.addr)
@classmethod
def teardownAll(cls):
cls.server.shutdown()
@property
def last_handler(self):
return self.server.server.last_handler
class TServer(tcp.TCPServer):
def __init__(self, ssl, q, handler_klass, addr):
"""
ssl: A {cert, key, v3_only} dict.
"""
tcp.TCPServer.__init__(self, addr)
self.ssl, self.q = ssl, q
self.handler_klass = handler_klass
self.last_handler = None
def handle_client_connection(self, request, client_address):
h = self.handler_klass(request, client_address, self)
self.last_handler = h
if self.ssl:
cert = certutils.SSLCert.from_pem(
file(self.ssl["cert"], "rb").read()
)
raw = file(self.ssl["key"], "rb").read()
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, raw)
if self.ssl["v3_only"]:
method = tcp.SSLv3_METHOD
options = tcp.OP_NO_SSLv2|tcp.OP_NO_TLSv1
else:
method = tcp.SSLv23_METHOD
options = None
h.convert_to_ssl(
cert, key,
method = method,
options = options,
handle_sni = getattr(h, "handle_sni", None),
request_client_cert = self.ssl["request_client_cert"],
cipher_list = self.ssl.get("cipher_list", None)
)
h.handle()
h.finish()
def handle_error(self, request, client_address):
s = cStringIO.StringIO()
tcp.TCPServer.handle_error(self, request, client_address, s)
self.q.put(s.getvalue())
| phase-dev/phase | netlib/test.py | Python | gpl-3.0 | 2,405 |
#! /usr/bin/env python3
#
# Example Python module for prepaid usage using MySQL
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
#
# Copyright 2002 Miguel A.L. Paraz <mparaz@mparaz.com>
# Copyright 2002 Imperium Technology, Inc.
#
# $Id$
import freeradius
import MySQLdb
# Configuration
configDb = "python" # Database name
configHost = "localhost" # Database host
configUser = "python" # Database user and password
configPasswd = "python"
# xxx Database
# Globals
dbHandle = None
def log(level, s):
"""Log function."""
freeradius.radlog(level, "prepaid.py: " + s)
def instantiate(p):
"""Module Instantiation. 0 for success, -1 for failure. p is a dummy variable here."""
global dbHandle
try:
dbHandle = MySQLdb.connect(
db=configDb, host=configHost, user=configUser, passwd=configPasswd
)
except MySQLdb.OperationalError as e:
# Report the error and return -1 for failure.
# xxx A more advanced module would retry the database.
log(freeradius.L_ERR, str(e))
return -1
log(freeradius.L_INFO, "db connection: " + str(dbHandle))
return 0
def authorize(authData):
"""Authorization and authentication are done in one step."""
# Extract the data we need.
userName = None
userPasswd = None
for t in authData:
if t[0] == "User-Name":
userName = t[1]
elif t[0] == "Password":
userPasswd = t[1]
# Build and log the SQL statement
# freeradius puts double quotes (") around the string representation of
# the RADIUS packet.
sql = "select passwd, maxseconds from users where username = " + userName
log(freeradius.L_DBG, sql)
# Get a cursor
# xxx Or should this be one cursor all throughout?
try:
dbCursor = dbHandle.cursor()
except MySQLdb.OperationalError as e:
log(freeradius.L_ERR, str(e))
return freeradius.RLM_MODULE_FAIL
# Execute the SQL statement
try:
dbCursor.execute(sql)
except MySQLdb.OperationalError as e:
log(freeradius.L_ERR, str(e))
dbCursor.close()
return freeradius.RLM_MODULE_FAIL
# Get the result. (passwd, maxseconds)
result = dbCursor.fetchone()
if not result:
# User not found
log(freeradius.L_INFO, "user not found: " + userName)
dbCursor.close()
return freeradius.RLM_MODULE_NOTFOUND
# Compare passwords
# Ignore the quotes around userPasswd.
if result[0] != userPasswd[1:-1]:
log(freeradius.L_DBG, "user password mismatch: " + userName)
return freeradius.RLM_MODULE_REJECT
maxSeconds = result[1]
# Compute their session limit
# Build and log the SQL statement
sql = "select sum(seconds) from sessions where username = " + userName
log(freeradius.L_DBG, sql)
# Execute the SQL statement
try:
dbCursor.execute(sql)
except MySQLdb.OperationalError as e:
log(freeradius.L_ERR, str(e))
dbCursor.close()
return freeradius.RLM_MODULE_FAIL
# Get the result. (sum,)
result = dbCursor.fetchone()
if (not result) or (not result[0]):
# No usage yet
secondsUsed = 0
else:
secondsUsed = result[0]
# Done with cursor
dbCursor.close()
# Note that MySQL returns the result of SUM() as a float.
sessionTimeout = maxSeconds - int(secondsUsed)
if sessionTimeout <= 0:
# No more time, reject outright
log(freeradius.L_INFO, "user out of time: " + userName)
return freeradius.RLM_MODULE_REJECT
# Log the success
log(freeradius.L_DBG, "user accepted: %s, %d seconds" % (userName, sessionTimeout))
# We are adding to the RADIUS packet
# Note that the session timeout integer must be converted to string.
# We need to set an Auth-Type.
return (
freeradius.RLM_MODULE_UPDATED,
(("Session-Timeout", str(sessionTimeout)),),
(("Auth-Type", "python"),),
)
# If you want to use different operators
# you can do
# return (freeradius.RLM_MODULE_UPDATED,
# (
# ('Session-Timeout', ':=', str(sessionTimeout)),
# ('Some-other-option', '-=', Value'),
# ),
# (
# ('Auth-Type', ':=', 'python'),
# ),
# )
def authenticate(p):
return freeradius.RLM_MODULE_OK
def preacct(p):
return freeradius.RLM_MODULE_OK
def accounting(acctData):
"""Accounting."""
# Extract the data we need.
userName = None
acctSessionTime = None
acctStatusType = None
# xxx A dict would make this nice.
for t in acctData:
if t[0] == "User-Name":
userName = t[1]
elif t[0] == "Acct-Session-Time":
acctSessionTime = t[1]
elif t[0] == "Acct-Status-Type":
acctStatusType = t[1]
# We will not deal with Start for now.
# We may later, for simultaneous checks and the like.
if acctStatusType == "Start":
return freeradius.RLM_MODULE_OK
# Build and log the SQL statement
# freeradius puts double quotes (") around the string representation of
# the RADIUS packet.
#
# xxx This is simplistic as it does not record the time, etc.
#
sql = "insert into sessions (username, seconds) values (%s, %d)" % (
userName,
int(acctSessionTime),
)
log(freeradius.L_DBG, sql)
# Get a cursor
# xxx Or should this be one cursor all throughout?
try:
dbCursor = dbHandle.cursor()
except MySQLdb.OperationalError as e:
log(freeradius.L_ERR, str(e))
return freeradius.RLM_MODULE_FAIL
# Execute the SQL statement
try:
dbCursor.execute(sql)
except MySQLdb.OperationalError as e:
log(freeradius.L_ERR, str(e))
dbCursor.close()
return freeradius.RLM_MODULE_FAIL
return freeradius.RLM_MODULE_OK
def detach():
"""Detach and clean up."""
# Shut down the database connection.
global dbHandle
log(freeradius.L_DBG, "closing database handle: " + str(dbHandle))
dbHandle.close()
return freeradius.RLM_MODULE_OK
# Test the modules
if __name__ == "__main__":
instantiate(None)
print(authorize((("User-Name", '"map"'), ("User-Password", '"abc"'))))
| jrouzierinverse/freeradius-server | src/modules/rlm_python/prepaid.py | Python | gpl-2.0 | 6,988 |
import numpy as np
from collections import OrderedDict
class Society:
neighbourhood_rows = 2
neighbourhood_cols = 2
neighbourhood_count = neighbourhood_rows * neighbourhood_cols
childcare_cost = 4000
transportation_cost = 2000
def __init__(self, rng):
self.rng = rng
self.gender = OrderedDict(male=0.5, female=0.5)
self.race = OrderedDict(black=0.3, white=0.3, hispanic=0.2, asian=0.2)
self.interv_private = 0
self.interv_public = 0
self.get_job_cost_public = 0
self.get_job_cost_private = 0
# NOTE: order matters! probabilities can be conditional on anything
# that is above them in the list below
# keys: overall is p(F)
# other terms are p(F|term)
# underscores_like_this are p(F|underscores^like^this)
self.probs = [
#('prison', dict(overall=0.4, male=0.2, female=0.1, white=0.2,
# black=0.3, black_male=0.9)),
('prison', OrderedDict(overall=0.02)),
('childcare', OrderedDict(overall=0.4)),
('highschool', OrderedDict(overall=0.4)),
]
self.jobs = {
'service_low': dict(no_highschool=None, highschool=0.5, experience_service=1),
'service_high': dict(no_highschool=None, highschool=0.5, experience_service=1, baseline=-3),
'manufacturing_low': dict(no_highschool=None, highschool=0.5, experience_manufacturing=1),
'manufacturing_high': dict(no_highschool=None, highschool=0.5, experience_manufacturing=1, baseline=-3),
}
self.job_sector = {
'service_low': 'service',
'service_high': 'service',
'manufacturing_high': 'manufacturing',
'manufacturing_low': 'manufacturing',
}
self.job_commonality = OrderedDict(service_low=0.3, service_high=0.3,
manufacturing_low=0.2, manufacturing_high=0.2)
self.job_income = {
'service_low': (13000, 1300), # starting, annual raise
'service_high': (22000, 3600), # starting, annual raise
'manufacturing_low': (14000, 900), # starting, annual raise
'manufacturing_high': (27000, 3500), # starting, annual raise
}
self.job_retention = {
'service_low': [0.5, 0.8], # first year, next years
'service_high': [0.5, 0.8],
'manufacturing_low': [0.5, 0.8],
'manufacturing_high': [0.5, 0.8],
}
self.job_productivity = {
# exponential curves take approx 3*exp_time to reach asymptote
'service_low': (32000, 0.5), # final asymptote, exp_time
'service_high': (67000, 1), # final asymptote, exp_time
'manufacturing_low': (37000, 0.5), # final asymptote, exp_time
'manufacturing_high': (73000, 1), # final asymptote, exp_time
}
self.job_hiring = {
'service_low': 5000,
'service_high': 5000,
'manufacturing_low': 5000,
'manufacturing_high': 5000,
}
self.neighbourhoods = [Neighbourhood(self)
for i in range(self.neighbourhood_count)]
self.distance_penalty_scale = 10000
self.set_racial_discrimination(0.3)
def set_racial_discrimination(self, value=1.0,
races=['black', 'hispanic', 'asian']):
for v in self.jobs.values():
for r in races:
v[r] = -value
def adjust_retention(self, value):
for v in self.job_retention.values():
for i in range(len(v)):
v[i] = 0.5 * (v[i] + value)
# numerical
def create_attributes(self):
attr = {}
attr['prob_apply'] = self.rng.normal(0.5, 0.25)
attr['distance_penalty'] = self.rng.uniform(0, 0.5)
attr['distance_penalty'] = self.rng.uniform(1.0, 2.0)
attr['interview_skill'] = self.rng.normal(0.2, 0.2)
attr['interview_skill_sd'] = self.rng.uniform(0.1, 0.4)
attr['experience'] = 0.0
attr['experience_service'] = 0.0
attr['experience_manufacturing'] = 0.0
attr['unemployed_time'] = 0.0
return attr
# binary
def create_features(self):
features = []
features.append(self.pick_one(self.gender))
features.append(self.pick_one(self.race))
for feature, prob in self.probs:
p = self.compute_conditional(features, prob)
if self.rng.rand() < p:
features.append(feature)
else:
features.append('no_' + feature)
return features
def pick_one(self, options):
assert isinstance(options, OrderedDict)
return self.rng.choice(options.keys(), p=options.values())
def compute_conditional(self, feature, prob):
# given the set of conditional probabilities and the given features,
# return the probability of having the feature
#TODO: validate this algorithm
relevant = [k for k in prob.keys() if '_' not in k and k in feature]
for k in prob.keys():
if '_' in k:
parts = k.split('_')
for p in parts:
if p not in feature:
break
else:
for p in parts:
if p in relevant:
relevant.remove(p)
relevant.append(k)
pF = prob['overall']
if len(relevant) == 0:
return pF
p = [prob[k] for k in relevant]
if len(p) == 1:
return p
all = np.prod(p)
none = np.prod([1-pp for pp in p])
probability = all / (all + (pF / (1-pF)) * none)
return probability
def color_blend(c1, c2, blend):
if blend <= 0:
c = c1
elif blend >= 1.0:
c = c2
else:
c = c1 * (1-blend) + c2 * (blend)
return '#%02x%02x%02x' % (c[0]*255, c[1]*255, c[2]*255)
class Person:
def __init__(self, society):
self.society = society
self.age = 16
self.job = None
self.job_length = 0.0
self.income = 0
self.features = society.create_features()
self.attributes = society.create_attributes()
self.neighbourhood = society.rng.choice(society.neighbourhoods)
self.location = self.neighbourhood.allocate_location()
self.local_preference_bonus = 1000
self.childcare_support = 0
def does_apply(self, job):
salary = self.society.job_income[job.type][0]
reservation_wage = 11000
if 'childcare' in self.features:
reservation_wage += self.society.childcare_cost
reservation_wage -= self.childcare_support
if self.neighbourhood is not job.employer.neighbourhood:
reservation_wage += self.society.transportation_cost
if reservation_wage > salary:
return False
p = self.attributes['prob_apply']
if job.employer.neighbourhood is not self.neighbourhood:
p -= self.attributes['distance_penalty'] * self.society.distance_penalty_scale
return self.society.rng.rand() < p
def compute_suitability(self, job):
score = 0
if self.neighbourhood is job.employer.neighbourhood:
score += self.local_preference_bonus
else:
score -= self.society.transportation_cost
score += self.society.job_income[job.type][0]
return score
def get_color(self):
if self.job is None:
return color_blend(np.array([1.0, 0.5, 0.5]), np.array([1.0, 0.0, 0.0]),
self.job_length/5.0)
else:
return color_blend(np.array([0.5, 0.5, 1.0]), np.array([0.0, 0.0, 1.0]),
self.job_length/5.0)
def get_info(self):
text = '<h1>Person:</h1>'
visible = [x[0] for x in self.society.probs]
text += ', '.join([f for f in self.features if f in visible])
text += '<br/>%3.1f years old' % self.age
status = 'employed' if self.job is not None else 'unemployed'
text +='<br/>%s for %2.1f years' % (status, self.job_length)
if self.job: text += ' at %s' % self.job.type
text +='<br/>experience: %2.1f years (%2.1f service, %2.1f manufacturing)' % (self.attributes['experience'],
self.attributes['experience_service'],
self.attributes['experience_manufacturing'])
return text
class Employer:
jobs_per_employer = 10
def __init__(self, society):
self.society = society
self.jobs = [Job(society, self) for i in range(self.jobs_per_employer)]
self.neighbourhood = society.rng.choice(society.neighbourhoods[1:])
self.location = self.neighbourhood.allocate_location()
self.total_hiring_cost = 0
self.total_salary = 0
self.total_productivity = 0
self.total_net = 0
def get_color(self):
return '#888'
def get_info(self):
text = '<h1>Employer:</h1>'
text += 'net: <strong>$%5.2f</strong>' % self.total_net
return text
def step(self, dt):
self.hiring_cost = 0
self.salary = 0
self.productivity = 0
for j in self.jobs:
if j.employee is not None:
start, slope = self.society.job_income[j.type]
salary = start + j.employee.job_length * slope
self.salary += salary * dt
j.employee.income += salary * dt
t = j.employee.attributes['experience_'+self.society.job_sector[j.type]]
prod_max, prod_time = self.society.job_productivity[j.type]
prod = prod_max * (1-np.exp(-t/prod_time))
self.productivity += prod * dt
if j.employee.job_length == 0.0:
self.hiring_cost += self.society.job_hiring[j.type]
self.net = self.productivity - self.hiring_cost - self.salary
self.total_net += self.net
self.total_salary += self.salary
self.total_productivity += self.productivity
self.total_hiring_cost += self.hiring_cost
class Job:
def __init__(self, society, employer):
self.employee = None
self.employer = employer
self.society = society
self.type = society.rng.choice(society.job_commonality.keys(),
p=society.job_commonality.values())
def compute_suitability(self, person):
total = 0
for feature, value in self.society.jobs[self.type].items():
if value is None and feature in person.features:
return -np.inf
if feature == 'baseline':
total += value
elif feature in person.features:
total += value
elif feature in person.attributes:
total += value * person.attributes[feature]
return total
class Neighbourhood:
def __init__(self, society):
self.society = society
self.rows = 7
self.cols = 7
x, y = np.meshgrid(np.arange(self.cols), np.arange(self.rows))
self.locations = zip(x.flatten(),y.flatten())
def allocate_location(self):
if len(self.locations) == 0:
print 'warning: not enough space in neighbourhood'
return (0,0)
index = self.society.rng.randint(len(self.locations))
return self.locations.pop(index)
def free_location(self, loc):
self.locations.append(loc)
class Model:
employer_count = 10
people_per_step = 1
years_per_step = 0.1
max_age = 25
def __init__(self, seed=None):
self.rng = np.random.RandomState(seed=seed)
self.society = Society(self.rng)
self.employers = [Employer(self.society)
for i in range(self.employer_count)]
self.people = []
self.steps = 0
self.interventions = []
self.data = {}
self.init_data()
def step(self):
self.steps += 1
for interv in self.interventions:
interv.apply(self, self.steps)
for i in range(self.people_per_step):
self.people.append(Person(self.society))
applications = OrderedDict()
for e in self.employers:
for j in e.jobs:
if j.employee is None:
applications[j] = []
for p in self.people:
if p.job is None:
for j in applications.keys():
if p.does_apply(j):
applications[j].append(p)
interview = {}
for job, applicants in applications.items():
for a in applicants:
score = job.compute_suitability(a)
score += self.rng.normal(a.attributes['interview_skill'],
a.attributes['interview_skill_sd'])
interview[(job, a)] = score
iterations = 4
for i in range(iterations):
all_offers = OrderedDict()
for job, applicants in applications.items():
score = [interview[(job, a)] for a in applicants]
if len(score) > 0:
max_score = max(score)
if max_score > 0:
index = score.index(max_score)
person = applicants[index]
if person not in all_offers:
all_offers[person] = []
all_offers[person].append(job)
for person, offers in all_offers.items():
score = [person.compute_suitability(j) for j in offers]
if len(score) > 0:
max_score = max(score)
if max_score > 0:
index = score.index(max_score)
job = offers[index]
person.job = job
job.employee = person
self.society.interv_public += self.society.get_job_cost_public
self.society.interv_private += self.society.get_job_cost_private
person.job_evaluation = 'medium'
person.job_length = 0.0
for e in self.employers:
e.step(self.years_per_step)
self.increase_age()
self.remove_older()
self.job_evaluation()
self.update_data()
def job_evaluation(self):
for p in self.people:
if p.job is not None:
index = int(p.job_length)
retention = self.society.job_retention[p.job.type]
if index >= len(retention):
r = retention[-1]
else:
r = retention[index]
r = (1-r) * self.years_per_step
if self.society.rng.rand() < r:
self.fire(p)
def fire(self, person):
assert person.job is not None
person.job.employee = None
person.job = None
person.job_length = 0.0
def increase_age(self):
for p in self.people:
p.age += self.years_per_step
p.job_length += self.years_per_step
if p.job is not None:
p.attributes['experience'] += self.years_per_step
sector = self.society.job_sector[p.job.type]
p.attributes['experience_%s' % sector] += self.years_per_step
else:
p.attributes['unemployed_time'] += self.years_per_step
p.attributes['age'] = p.age
def remove_older(self):
for p in self.people:
if p.age > self.max_age:
p.neighbourhood.free_location(p.location)
self.people.remove(p)
if p.job is not None:
p.job.employee = None
def calc_employment(self):
count = 0
for p in self.people:
if p.job is not None:
count += 1
return float(count)/len(self.people)
def calc_feature_employment(self, feature):
count = 0
total = 0
for p in self.people:
if feature in p.features:
total += 1
if p.job is not None:
count += 1
if total == 0: return 0
return float(count) / total
def calc_attribute_employment(self, attribute, threshold):
count = 0
total = 0
for p in self.people:
if p.attributes[attribute] >= threshold:
total += 1
if p.job is not None:
count += 1
if total == 0: return 0
return float(count) / total
def calc_feature_rate(self, feature):
count = 0
for p in self.people:
if feature in p.features:
count += 1
return float(count)/len(self.people)
def calc_attribute_rate(self, attribute, threshold):
count = 0
for p in self.people:
if p.attributes[attribute] >= threshold:
count += 1
return float(count)/len(self.people)
def calc_employer_net(self):
return sum([e.net for e in self.employers])
def check_jobs(self):
for p in self.people:
print p.features, p.job.type if p.job is not None else None
#for e in self.employers:
# print e.total_net
def init_data(self):
self.data['employment'] = []
self.data['employer_net'] = []
self.data['highschool'] = []
self.data['employment_childcare'] = []
self.data['employment_nohighschool'] = []
self.data['employment_2_or_more_years'] = []
self.data['employment_18plus'] = []
self.data['proportion_childcare'] = []
self.data['proportion_nohighschool'] = []
self.data['proportion_2_or_more_years'] = []
self.data['proportion_18plus'] = []
self.data['cost_hiring'] = []
self.data['cost_salary'] = []
self.data['production'] = []
self.data['interv_public'] = []
self.data['interv_private'] = []
#for race in self.society.race.keys():
# self.data['employment_%s' % race] = []
# self.data['proportion_%s' % race] = []
def update_data(self):
if self.steps >= 100:
self.data['employment'].append(self.calc_employment()*100)
self.data['employer_net'].append(self.calc_employer_net()*0.001)
self.data['highschool'].append(self.calc_feature_rate('highschool')*100)
self.data['employment_childcare'].append(self.calc_feature_employment('childcare')*100)
self.data['employment_nohighschool'].append(self.calc_feature_employment('no_highschool')*100)
self.data['employment_2_or_more_years'].append(self.calc_attribute_employment('experience', threshold=2.0)*100)
self.data['employment_18plus'].append(self.calc_attribute_employment('age', threshold=18)*100)
self.data['proportion_childcare'].append(self.calc_feature_rate('childcare')*100)
self.data['proportion_nohighschool'].append(self.calc_feature_rate('no_highschool')*100)
self.data['proportion_2_or_more_years'].append(self.calc_attribute_rate('experience', threshold=2.0)*100)
self.data['proportion_18plus'].append(self.calc_attribute_rate('age', threshold=18)*100)
self.data['cost_hiring'].append(sum([e.hiring_cost for e in self.employers]))
self.data['cost_salary'].append(sum([e.salary for e in self.employers]))
self.data['production'].append(sum([e.productivity for e in self.employers]))
self.data['interv_public'].append(self.society.interv_public)
self.data['interv_private'].append(self.society.interv_private)
#for race in self.society.race.keys():
# self.data['employment_%s' % race].append(self.calc_feature_employment(race)*100)
# self.data['proportion_%s' % race].append(self.calc_feature_rate(race)*100)
def get_grid(self):
grid = []
for e in self.employers:
x, y = self.get_location(e)
color = e.get_color()
item = dict(type='employer', x=x, y=y, color=color, info=e.get_info())
grid.append(item)
for p in self.people:
x, y = self.get_location(p)
color = p.get_color()
item = dict(type='person', x=x, y=y, color=color, info=p.get_info())
grid.append(item)
return grid
def get_location(self, item):
xx, yy = item.location
n_index = self.society.neighbourhoods.index(item.neighbourhood)
x = (n_index % self.society.neighbourhood_cols) * item.neighbourhood.cols + xx
y = (n_index / self.society.neighbourhood_cols) * item.neighbourhood.rows + yy
return x, y
def get_data(self):
self.data['grid'] = self.get_grid()
return self.data
class RetentionIntervention:
def __init__(self, time, value):
self.time = time
self.value = value
def apply(self, model, timestep):
if timestep == self.time:
model.society.adjust_retention(self.value)
class SocietyParameterIntervention:
def __init__(self, time, parameter, value,
cost_sunk, cost_fixed, cost_variable, public_proportion):
self.time = time
self.parameter = parameter
self.value = value
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
self.cost_variable = cost_variable
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
#print ('setting', self.parameter, self.value,
# 'from', getattr(model.society, self.parameter))
setattr(model.society, self.parameter, self.value)
model.society.get_job_cost_public += self.public_proportion * self.cost_variable
model.society.get_job_cost_private += (1 - self.public_proportion) * self.cost_variable
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
class SectorMobilityIntervention:
def __init__(self, time, value,
cost_sunk, cost_fixed, public_proportion):
self.time = time
self.value = value
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
#self.cost_variable = cost_variable
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
for type, values in model.society.jobs.items():
sector = model.society.job_sector[type]
if sector == 'service':
values['employment_manufactoring'] = self.value
elif sector == 'manufacturing':
values['employment_service'] = self.value
#model.society.get_job_cost_public += self.public_proportion * self.cost_variable
#model.society.get_job_cost_private += (1 - self.public_proportion) * self.cost_variable
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
class PovertyBiasIntervention:
def __init__(self, time, value,
cost_sunk, cost_fixed, cost_variable, public_proportion):
self.time = time
self.value = value
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
self.cost_variable = cost_variable
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
for type, values in model.society.jobs.items():
values['unemployed_time'] = self.value
model.society.get_job_cost_public += self.public_proportion * self.cost_variable
model.society.get_job_cost_private += (1 - self.public_proportion) * self.cost_variable
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
class NoHighschoolPenaltyIntervention:
def __init__(self, time, value,
cost_sunk, cost_fixed, cost_variable, public_proportion):
self.time = time
self.value = value
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
self.cost_variable = cost_variable
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
for type, values in model.society.jobs.items():
values['no_highschool'] = self.value
model.society.get_job_cost_public += self.public_proportion * self.cost_variable
model.society.get_job_cost_private += (1 - self.public_proportion) * self.cost_variable
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
class RelocateIntervention:
def __init__(self, time, value,
cost_sunk, cost_fixed, public_proportion):
self.time = time
self.value = value
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
for i in range(self.value):
employer = model.rng.choice(model.employers)
employer.neighbourhood.free_location(employer.location)
employer.neighbourhood = model.society.neighbourhoods[0]
employer.location = employer.neighbourhood.allocate_location()
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
class DiscriminationIntervention:
def __init__(self, time, value):
self.time = time
self.value = value
def apply(self, model, timestep):
if timestep == self.time:
#print ('setting', self.parameter, self.value,
# 'from', getattr(model.society, self.parameter))
model.society.set_racial_discrimination(self.value)
class ChildcareIntervention:
def __init__(self, time, proportion, value,
cost_sunk, cost_fixed, cost_variable, public_proportion):
self.time = time
self.proportion = proportion
self.value = value
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
self.cost_variable = cost_variable
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
for p in model.people:
if 'childcare' in p.features:
if model.rng.rand() < self.proportion:
p.childcare_support = self.value
else:
p.childcare_support = 0
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
for p in model.people:
if p.age < 16 + Model.years_per_step * 2:
if 'childcare' in p.features:
if model.rng.rand() < self.proportion:
model.society.interv_public += self.public_proportion * self.cost_variable * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_variable * Model.years_per_step
p.childcare_support = self.value
else:
p.childcare_support = 0
class HighschoolCertificateIntervention:
def __init__(self, time, proportion,
cost_sunk, cost_fixed, cost_variable, public_proportion):
self.time = time
self.proportion = proportion
self.cost_sunk = cost_sunk
self.cost_fixed = cost_fixed
self.cost_variable = cost_variable
self.public_proportion = public_proportion
def apply(self, model, timestep):
if timestep == self.time:
model.society.interv_public += self.public_proportion * self.cost_sunk
model.society.interv_private += (1-self.public_proportion) * self.cost_sunk
for p in model.people:
if 'no_highschool' in p.features:
if model.rng.rand() < self.proportion:
p.features.append('highschool')
p.features.remove('no_highschool')
elif timestep > self.time:
model.society.interv_public += self.public_proportion * self.cost_fixed * Model.years_per_step
model.society.interv_private += (1-self.public_proportion) * self.cost_fixed * Model.years_per_step
for p in model.people:
if p.age < 16 + Model.years_per_step * 2:
if 'no_highschool' in p.features:
if model.rng.rand() < self.proportion:
model.society.interv_public += self.public_proportion * self.cost_variable
model.society.interv_private += (1-self.public_proportion) * self.cost_variable
p.features.append('highschool')
p.features.remove('no_highschool')
def memoize(f):
""" Memoization decorator for functions taking one or more arguments. """
class memodict(dict):
def __init__(self, f):
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return memodict(f)
def clear_cache():
model_cache.clear()
run.clear() # clear the memoized cache too
model_cache = {}
import copy
def find_cached_model(seed, actions):
for step in reversed(range(len(actions)+1)):
result = model_cache.get((seed, tuple(actions[:step])), None)
if result is not None:
step, model = result
return step, copy.deepcopy(model)
model = Model(seed=seed)
presteps = 100
for i in range(presteps):
model.step()
model_cache[(seed, ())] = copy.deepcopy(model)
return -1, model
@memoize
def run(seed, *actions):
step, model = find_cached_model(seed, actions)
presteps = 100
steps_per_action = 10
for i, action in enumerate(actions):
if i > step: # if we haven't done this step yet
interv_step = presteps + 1 + steps_per_action * i
if action == 'highschool-high':
interv = HighschoolCertificateIntervention(interv_step, 0.9,
cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'highschool-med':
interv = HighschoolCertificateIntervention(interv_step, 0.5,
cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'highschool-low':
interv = HighschoolCertificateIntervention(interv_step, 0.2,
cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'mobility-high':
interv = SocietyParameterIntervention(interv_step,
'distance_penalty_scale', 0,
cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'mobility-med':
interv = SocietyParameterIntervention(interv_step,
'distance_penalty_scale', 0.5,
cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'mobility-low':
interv = SocietyParameterIntervention(interv_step,
'distance_penalty_scale', 10,
cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'discriminate-normal':
interv = DiscriminationIntervention(interv_step, 0.3)
elif action == 'discriminate-high':
interv = DiscriminationIntervention(interv_step, 2.0)
elif action == 'discriminate-low':
interv = DiscriminationIntervention(interv_step, 0.0)
elif action == 'retention+':
interv = RetentionIntervention(interv_step, 1.0)
elif action == 'retention-':
interv = RetentionIntervention(interv_step, 0.0)
elif action == 'childcare-low':
interv = ChildcareIntervention(interv_step, 0.2, 4000, cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'childcare-med':
interv = ChildcareIntervention(interv_step, 0.5, 4000, cost_sunk=40000, cost_fixed=15000, cost_variable=1000, public_proportion=0.5)
elif action == 'childcare-high':
interv = ChildcareIntervention(interv_step, 0.8, 4000, cost_sunk=80000, cost_fixed=15000, cost_variable=1000, public_proportion=0.5)
elif action == 'sectormobility-low':
interv = SectorMobilityIntervention(interv_step, 0.2, cost_sunk=20000, cost_fixed=5000, public_proportion=0.5)
elif action == 'sectormobility-med':
interv = SectorMobilityIntervention(interv_step, 0.5, cost_sunk=40000, cost_fixed=15000, public_proportion=0.5)
elif action == 'sectormobility-high':
interv = SectorMobilityIntervention(interv_step, 0.8, cost_sunk=80000, cost_fixed=15000, public_proportion=0.5)
elif action == 'povertybias-low':
interv = PovertyBiasIntervention(interv_step, 2, cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'povertybias-med':
interv = PovertyBiasIntervention(interv_step, 5, cost_sunk=40000, cost_fixed=15000, cost_variable=1000, public_proportion=0.5)
elif action == 'povertybias-high':
interv = PovertyBiasIntervention(interv_step, 10, cost_sunk=80000, cost_fixed=15000, cost_variable=1000, public_proportion=0.5)
elif action == 'hsnotrequired-low':
interv = NoHighschoolPenaltyIntervention(interv_step, 3, cost_sunk=20000, cost_fixed=5000, cost_variable=1000, public_proportion=0.5)
elif action == 'hsnotrequired-med':
interv = NoHighschoolPenaltyIntervention(interv_step, 1, cost_sunk=40000, cost_fixed=15000, cost_variable=1000, public_proportion=0.5)
elif action == 'hsnotrequired-high':
interv = NoHighschoolPenaltyIntervention(interv_step, 0, cost_sunk=80000, cost_fixed=15000, cost_variable=1000, public_proportion=0.5)
elif action == 'move-1':
interv = RelocateIntervention(interv_step, 1, cost_sunk=20000, cost_fixed=0, public_proportion=0.5)
elif action == 'move-2':
interv = RelocateIntervention(interv_step, 2, cost_sunk=40000, cost_fixed=0, public_proportion=0.5)
elif action == 'move-3':
interv = RelocateIntervention(interv_step, 3, cost_sunk=80000, cost_fixed=0, public_proportion=0.5)
else:
interv = None
print 'unknown intervention', action
if interv is not None:
model.interventions.append(interv)
for ii in range(steps_per_action):
model.step()
model_cache[(seed, tuple(actions[:(i+1)]))] = (i, copy.deepcopy(model))
return model.get_data()
if __name__ == '__main__':
print 1
run(1, 'init')
print 2
clear_cache()
print 3
run(1, 'init')
print 4
1/0
m = Model()
m.interventions.append(HighschoolCertificateIntervention(50, 1.0))
m.step()
for i in range(1000):
print i, len(m.people), m.calc_employment()
m.check_jobs()
m.step()
| tcstewar/farm_game | farm_game/model_bak.py | Python | gpl-2.0 | 38,545 |
# Copyright (C) 2005-2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#from __future__ import absolute_import
# mbp: "you know that thing where cvs gives you conflict markers?"
# s: "i hate that."
import errors
import patiencediff
import textfile
def intersect(ra, rb):
"""Given two ranges return the range where they intersect or None.
>>> intersect((0, 10), (0, 6))
(0, 6)
>>> intersect((0, 10), (5, 15))
(5, 10)
>>> intersect((0, 10), (10, 15))
>>> intersect((0, 9), (10, 15))
>>> intersect((0, 9), (7, 15))
(7, 9)
"""
# preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
sa = max(ra[0], rb[0])
sb = min(ra[1], rb[1])
if sa < sb:
return sa, sb
else:
return None
def compare_range(a, astart, aend, b, bstart, bend):
"""Compare a[astart:aend] == b[bstart:bend], without slicing.
"""
if (aend - astart) != (bend - bstart):
return False
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
if a[ia] != b[ib]:
return False
else:
return True
class Merge3(object):
"""3-way merge of texts.
Given BASE, OTHER, THIS, tries to produce a combined text
incorporating the changes from both BASE->OTHER and BASE->THIS.
All three will typically be sequences of lines."""
def __init__(self, base, a, b, is_cherrypick=False, allow_objects=False):
"""Constructor.
:param base: lines in BASE
:param a: lines in A
:param b: lines in B
:param is_cherrypick: flag indicating if this merge is a cherrypick.
When cherrypicking b => a, matches with b and base do not conflict.
:param allow_objects: if True, do not require that base, a and b are
plain Python strs. Also prevents BinaryFile from being raised.
Lines can be any sequence of comparable and hashable Python
objects.
"""
if not allow_objects:
textfile.check_text_lines(base)
textfile.check_text_lines(a)
textfile.check_text_lines(b)
self.base = base
self.a = a
self.b = b
self.is_cherrypick = is_cherrypick
def merge_lines(self,
name_a=None,
name_b=None,
name_base=None,
start_marker='<<<<<<<',
mid_marker='=======',
end_marker='>>>>>>>',
base_marker=None,
reprocess=False):
"""Return merge in cvs-like form.
"""
newline = '\n'
if len(self.a) > 0:
if self.a[0].endswith('\r\n'):
newline = '\r\n'
elif self.a[0].endswith('\r'):
newline = '\r'
if base_marker and reprocess:
raise errors.CantReprocessAndShowBase()
if name_a:
start_marker = start_marker + ' ' + name_a
if name_b:
end_marker = end_marker + ' ' + name_b
if name_base and base_marker:
base_marker = base_marker + ' ' + name_base
merge_regions = self.merge_regions()
if reprocess is True:
merge_regions = self.reprocess_merge_regions(merge_regions)
for t in merge_regions:
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
yield start_marker + newline
for i in range(t[3], t[4]):
yield self.a[i]
if base_marker is not None:
yield base_marker + newline
for i in range(t[1], t[2]):
yield self.base[i]
yield mid_marker + newline
for i in range(t[5], t[6]):
yield self.b[i]
yield end_marker + newline
else:
raise ValueError(what)
def merge(self):
"""Return merge"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
for i in range(t[3], t[4]):
yield self.a[i]
for i in range(t[5], t[6]):
yield self.b[i]
else:
raise ValueError(what)
def merge_annotated(self):
"""Return merge with conflicts, showing origin of lines.
Most useful for debugging merge.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield 'u | ' + self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield what[0] + ' | ' + self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield 'b | ' + self.b[i]
elif what == 'conflict':
yield '<<<<\n'
for i in range(t[3], t[4]):
yield 'A | ' + self.a[i]
yield '----\n'
for i in range(t[5], t[6]):
yield 'B | ' + self.b[i]
yield '>>>>\n'
else:
raise ValueError(what)
def merge_groups(self):
"""Yield sequence of line groups. Each one is a tuple:
'unchanged', lines
Lines unchanged from base
'a', lines
Lines taken from a
'same', lines
Lines taken from a (and equal to b)
'b', lines
Lines taken from b
'conflict', base_lines, a_lines, b_lines
Lines from base were changed to either a or b and conflict.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
yield what, self.base[t[1]:t[2]]
elif what == 'a' or what == 'same':
yield what, self.a[t[1]:t[2]]
elif what == 'b':
yield what, self.b[t[1]:t[2]]
elif what == 'conflict':
yield (what,
self.base[t[1]:t[2]],
self.a[t[3]:t[4]],
self.b[t[5]:t[6]])
else:
raise ValueError(what)
def merge_regions(self):
"""Return sequences of matching and conflicting regions.
This returns tuples, where the first value says what kind we
have:
'unchanged', start, end
Take a region of base[start:end]
'same', astart, aend
b and a are different from base but give the same result
'a', start, end
Non-clashing insertion from a[start:end]
Method is as follows:
The two sequences align only on regions which match the base
and both descendents. These are found by doing a two-way diff
of each one against the base, and then finding the
intersections between those regions. These "sync regions"
are by definition unchanged in both and easily dealt with.
The regions in between can be in any of three cases:
conflicted, or changed on only one side.
"""
# section a[0:ia] has been disposed of, etc
iz = ia = ib = 0
for zmatch, zend, amatch, aend, \
bmatch, bend in self.find_sync_regions():
matchlen = zend - zmatch
# invariants:
# matchlen >= 0
# matchlen == (aend - amatch)
# matchlen == (bend - bmatch)
len_a = amatch - ia
len_b = bmatch - ib
#len_base = zmatch - iz
# invariants:
# assert len_a >= 0
# assert len_b >= 0
# assert len_base >= 0
#print 'unmatched a=%d, b=%d' % (len_a, len_b)
if len_a or len_b:
# try to avoid actually slicing the lists
same = compare_range(self.a, ia, amatch,
self.b, ib, bmatch)
if same:
yield 'same', ia, amatch
else:
equal_a = compare_range(self.a, ia, amatch,
self.base, iz, zmatch)
equal_b = compare_range(self.b, ib, bmatch,
self.base, iz, zmatch)
if equal_a and not equal_b:
yield 'b', ib, bmatch
elif equal_b and not equal_a:
yield 'a', ia, amatch
elif not equal_a and not equal_b:
if self.is_cherrypick:
for node in self._refine_cherrypick_conflict(
iz, zmatch, ia, amatch,
ib, bmatch):
yield node
else:
yield 'conflict', \
iz, zmatch, ia, amatch, ib, bmatch
else:
raise AssertionError(
"can't handle a=b=base but unmatched")
ia = amatch
ib = bmatch
iz = zmatch
# if the same part of the base was deleted on both sides
# that's OK, we can just skip it.
if matchlen > 0:
# invariants:
# assert ia == amatch
# assert ib == bmatch
# assert iz == zmatch
yield 'unchanged', zmatch, zend
iz = zend
ia = aend
ib = bend
def _refine_cherrypick_conflict(self, zstart,
zend, astart, aend, bstart, bend):
"""When cherrypicking b => a, ignore matches with b and base."""
# Do not emit regions which match, only regions which do not match
matches = patiencediff.PatienceSequenceMatcher(None,
self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
last_base_idx = 0
last_b_idx = 0
last_b_idx = 0
yielded_a = False
for base_idx, b_idx, match_len in matches:
#conflict_z_len = base_idx - last_base_idx
conflict_b_len = b_idx - last_b_idx
if conflict_b_len == 0: # There are no lines in b which conflict,
# so skip it
pass
else:
if yielded_a:
yield ('conflict',
zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart +
base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
last_base_idx = base_idx + match_len
last_b_idx = b_idx + match_len
if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
if yielded_a:
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
if not yielded_a:
yield ('conflict', zstart, zend, astart, aend, bstart, bend)
def reprocess_merge_regions(self, merge_regions):
"""Where there are conflict regions, remove the agreed lines.
Lines where both A and B have made the same changes are
eliminated.
"""
for region in merge_regions:
if region[0] != "conflict":
yield region
continue
type, iz, zmatch, ia, amatch, ib, bmatch = region
a_region = self.a[ia:amatch]
b_region = self.b[ib:bmatch]
matches = patiencediff.PatienceSequenceMatcher(
None, a_region, b_region).get_matching_blocks()
next_a = ia
next_b = ib
for region_ia, region_ib, region_len in matches[:-1]:
region_ia += ia
region_ib += ib
reg = self.mismatch_region(next_a, region_ia, next_b,
region_ib)
if reg is not None:
yield reg
yield 'same', region_ia, region_len + region_ia
next_a = region_ia + region_len
next_b = region_ib + region_len
reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
if reg is not None:
yield reg
@staticmethod
def mismatch_region(next_a, region_ia, next_b, region_ib):
if next_a < region_ia or next_b < region_ib:
return 'conflict', None, None, next_a, region_ia, next_b, region_ib
def find_sync_regions(self):
"""Return a list of sync regions,where both descendents match the base.
Generates a list of (base1, base2, a1, a2, b1, b2). There is
always a zero-length sync region at the end of all the files.
"""
ia = ib = 0
amatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bmatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
len_a = len(amatches)
len_b = len(bmatches)
sl = []
while ia < len_a and ib < len_b:
abase, amatch, alen = amatches[ia]
bbase, bmatch, blen = bmatches[ib]
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
i = intersect((abase, abase + alen), (bbase, bbase + blen))
if i:
intbase = i[0]
intend = i[1]
intlen = intend - intbase
# found a match of base[i[0], i[1]]; this may be less than
# the region that matches in either one
# assert intlen <= alen
# assert intlen <= blen
# assert abase <= intbase
# assert bbase <= intbase
asub = amatch + (intbase - abase)
bsub = bmatch + (intbase - bbase)
aend = asub + intlen
bend = bsub + intlen
# assert self.base[intbase:intend] == self.a[asub:aend], \
# (self.base[intbase:intend], self.a[asub:aend])
# assert self.base[intbase:intend] == self.b[bsub:bend]
sl.append((intbase, intend,
asub, aend,
bsub, bend))
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
else:
ib += 1
intbase = len(self.base)
abase = len(self.a)
bbase = len(self.b)
sl.append((intbase, intbase, abase, abase, bbase, bbase))
return sl
def find_unconflicted(self):
"""Return a list of ranges in base that are not conflicted."""
am = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bm = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
unc = []
while am and bm:
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
a1 = am[0][0]
a2 = a1 + am[0][2]
b1 = bm[0][0]
b2 = b1 + bm[0][2]
i = intersect((a1, a2), (b1, b2))
if i:
unc.append(i)
if a2 < b2:
del am[0]
else:
del bm[0]
return unc
def main(argv):
# as for diff3 and meld the syntax is "MINE BASE OTHER"
a = file(argv[1], 'rt').readlines()
base = file(argv[2], 'rt').readlines()
b = file(argv[3], 'rt').readlines()
m3 = Merge3(base, a, b)
#for sr in m3.find_sync_regions():
# print sr
# sys.stdout.writelines(m3.merge_lines(name_a=argv[1], name_b=argv[3]))
sys.stdout.writelines(m3.merge())
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| khertan/ownNotes | python/merge3/merge3.py | Python | gpl-3.0 | 18,698 |
# Program to take the highest predicted links and insert them into
# a new file
# usage: python combine.py <new edges> <old edges> <number of new edges> <output file>
import sys
# Check command line arguments
args = sys.argv
if len(args) != 5:
print "Usage: python " + args[0] + " <new edges> <old edges> <number of new edges> <output file>"
quit()
newName = str(args[1])
oldName = str(args[2])
numEdges = int(args[3])
outputName = str(args[4])
# First, copy the old file into the new file
old = open(oldName, "r")
output = open(outputName, "w")
for line in old:
line = line.rstrip()
[left, right] = line.split("\t")
output.write(str(left) + "\t" + str(right) + "\n")
# Now write the best predicted links to the file
new = open(newName, "r")
count = 0
for line in new:
line = line.rstrip()
[score, left, right] = line.split("\t")
output.write(str(left) + "\t" + str(right) + "\n")
count += 1
#print "count: " + str(count) + " numEdges: " + str(numEdges)
if count >= numEdges:
break
| coriander-/AA_bipartite_link_prediction | combine.py | Python | mit | 1,003 |
"""Tests for letsencrypt.le_util."""
import errno
import os
import shutil
import stat
import tempfile
import unittest
import mock
from letsencrypt import errors
class MakeOrVerifyDirTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.make_or_verify_dir.
Note that it is not possible to test for a wrong directory owner,
as this testing script would have to be run as root.
"""
def setUp(self):
self.root_path = tempfile.mkdtemp()
self.path = os.path.join(self.root_path, "foo")
os.mkdir(self.path, 0o400)
self.uid = os.getuid()
def tearDown(self):
shutil.rmtree(self.root_path, ignore_errors=True)
def _call(self, directory, mode):
from letsencrypt.le_util import make_or_verify_dir
return make_or_verify_dir(directory, mode, self.uid)
def test_creates_dir_when_missing(self):
path = os.path.join(self.root_path, "bar")
self._call(path, 0o650)
self.assertTrue(os.path.isdir(path))
self.assertEqual(stat.S_IMODE(os.stat(path).st_mode), 0o650)
def test_existing_correct_mode_does_not_fail(self):
self._call(self.path, 0o400)
self.assertEqual(stat.S_IMODE(os.stat(self.path).st_mode), 0o400)
def test_existing_wrong_mode_fails(self):
self.assertRaises(errors.Error, self._call, self.path, 0o600)
def test_reraises_os_error(self):
with mock.patch.object(os, "makedirs") as makedirs:
makedirs.side_effect = OSError()
self.assertRaises(OSError, self._call, "bar", 12312312)
class CheckPermissionsTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.check_permissions.
Note that it is not possible to test for a wrong file owner,
as this testing script would have to be run as root.
"""
def setUp(self):
_, self.path = tempfile.mkstemp()
self.uid = os.getuid()
def tearDown(self):
os.remove(self.path)
def _call(self, mode):
from letsencrypt.le_util import check_permissions
return check_permissions(self.path, mode, self.uid)
def test_ok_mode(self):
os.chmod(self.path, 0o600)
self.assertTrue(self._call(0o600))
def test_wrong_mode(self):
os.chmod(self.path, 0o400)
self.assertFalse(self._call(0o600))
class UniqueFileTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.unique_file."""
def setUp(self):
self.root_path = tempfile.mkdtemp()
self.default_name = os.path.join(self.root_path, "foo.txt")
def tearDown(self):
shutil.rmtree(self.root_path, ignore_errors=True)
def _call(self, mode=0o600):
from letsencrypt.le_util import unique_file
return unique_file(self.default_name, mode)
def test_returns_fd_for_writing(self):
fd, name = self._call()
fd.write("bar")
fd.close()
self.assertEqual(open(name).read(), "bar")
def test_right_mode(self):
self.assertEqual(0o700, os.stat(self._call(0o700)[1]).st_mode & 0o777)
self.assertEqual(0o100, os.stat(self._call(0o100)[1]).st_mode & 0o777)
def test_default_exists(self):
name1 = self._call()[1] # create 0000_foo.txt
name2 = self._call()[1]
name3 = self._call()[1]
self.assertNotEqual(name1, name2)
self.assertNotEqual(name1, name3)
self.assertNotEqual(name2, name3)
self.assertEqual(os.path.dirname(name1), self.root_path)
self.assertEqual(os.path.dirname(name2), self.root_path)
self.assertEqual(os.path.dirname(name3), self.root_path)
basename1 = os.path.basename(name2)
self.assertTrue(basename1.endswith("foo.txt"))
basename2 = os.path.basename(name2)
self.assertTrue(basename2.endswith("foo.txt"))
basename3 = os.path.basename(name3)
self.assertTrue(basename3.endswith("foo.txt"))
class UniqueLineageNameTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.unique_lineage_name."""
def setUp(self):
self.root_path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.root_path, ignore_errors=True)
def _call(self, filename, mode=0o777):
from letsencrypt.le_util import unique_lineage_name
return unique_lineage_name(self.root_path, filename, mode)
def test_basic(self):
f, path = self._call("wow")
self.assertTrue(isinstance(f, file))
self.assertEqual(os.path.join(self.root_path, "wow.conf"), path)
def test_multiple(self):
for _ in xrange(10):
f, name = self._call("wow")
self.assertTrue(isinstance(f, file))
self.assertTrue(isinstance(name, str))
self.assertTrue("wow-0009.conf" in name)
@mock.patch("letsencrypt.le_util.os.fdopen")
def test_failure(self, mock_fdopen):
err = OSError("whoops")
err.errno = errno.EIO
mock_fdopen.side_effect = err
self.assertRaises(OSError, self._call, "wow")
@mock.patch("letsencrypt.le_util.os.fdopen")
def test_subsequent_failure(self, mock_fdopen):
self._call("wow")
err = OSError("whoops")
err.errno = errno.EIO
mock_fdopen.side_effect = err
self.assertRaises(OSError, self._call, "wow")
class SafeEmailTest(unittest.TestCase):
"""Test safe_email."""
@classmethod
def _call(cls, addr):
from letsencrypt.le_util import safe_email
return safe_email(addr)
def test_valid_emails(self):
addrs = [
"letsencrypt@letsencrypt.org",
"tbd.ade@gmail.com",
"abc_def.jdk@hotmail.museum",
]
for addr in addrs:
self.assertTrue(self._call(addr), "%s failed." % addr)
def test_invalid_emails(self):
addrs = [
"letsencrypt@letsencrypt..org",
".tbd.ade@gmail.com",
"~/abc_def.jdk@hotmail.museum",
]
for addr in addrs:
self.assertFalse(self._call(addr), "%s failed." % addr)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| tdfischer/lets-encrypt-preview | letsencrypt/tests/le_util_test.py | Python | apache-2.0 | 6,111 |
import Image
def merge_images(images):
"""Return the images in `images` joined one next another"""
images_pil = [Image.open(img) for img in images]
width = sum(img.size[0] for img in images_pil)
heigth = max(img.size[1] for img in images_pil)
result = Image.new('RGBA', (width, heigth))
pos = 0
for img in images_pil:
result.paste(img, (pos, 0))
pos += img.size[0]
return result
| omab/faces | pyfaces/utils.py | Python | mit | 432 |
import unittest
from prime_factors import prime_factors
# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0
class PrimeFactorsTest(unittest.TestCase):
def test_no_factors(self):
self.assertEqual(prime_factors(1), [])
def test_prime_number(self):
self.assertEqual(prime_factors(2), [2])
def test_square_of_a_prime(self):
self.assertEqual(prime_factors(9), [3, 3])
def test_cube_of_a_prime(self):
self.assertEqual(prime_factors(8), [2, 2, 2])
def test_product_of_primes_and_non_primes(self):
self.assertEqual(prime_factors(12), [2, 2, 3])
def test_product_of_primes(self):
self.assertEqual(prime_factors(901255), [5, 17, 23, 461])
def test_factors_include_a_large_prime(self):
self.assertEqual(prime_factors(93819012551), [11, 9539, 894119])
if __name__ == '__main__':
unittest.main()
| CubicComet/exercism-python-solutions | prime-factors/prime_factors_test.py | Python | agpl-3.0 | 906 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scheduletemplates', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='shifttemplate',
options={'ordering': ('schedule_template',), 'verbose_name': 'shift template', 'verbose_name_plural': 'shift templates'},
),
]
| klinger/volunteer_planner | scheduletemplates/migrations/0002_auto_20151013_2229.py | Python | agpl-3.0 | 465 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-04-05 15:18
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0444_auto_20190412_0948'),
]
operations = [
migrations.AddField(
model_name='learningcomponentyear',
name='learning_unit_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.LearningUnitYear'),
),
]
| uclouvain/OSIS-Louvain | base/migrations/0445_add_field_learning_unit_year.py | Python | agpl-3.0 | 576 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest as ut
import unittest_decorators as utx
import numpy as np
import math
import espressomd
import espressomd.interactions
import espressomd.shapes
import tests_common
@utx.skipIfMissingFeatures(["LENNARD_JONES_GENERIC"])
class ShapeBasedConstraintTest(ut.TestCase):
box_l = 30.
system = espressomd.System(box_l=3 * [box_l])
def tearDown(self):
self.system.part.clear()
self.system.constraints.clear()
def pos_on_surface(self, theta, v, semiaxis0, semiaxis1,
semiaxis2, center=np.array([15, 15, 15])):
"""Return position on ellipsoid surface."""
pos = np.array([semiaxis0 * np.sqrt(1. - v**2) * np.cos(theta),
semiaxis1 * np.sqrt(1. - v**2) * np.sin(theta),
semiaxis2 * v])
return pos + center
def test_hollow_conical_frustum(self):
"""
Test implementation of conical frustum shape.
"""
R1 = 5.0
R2 = 10.0
LENGTH = 15.0
D = 2.4
def z(y, r1, r2, l): return l / (r1 - r2) * \
y + l / 2. - l * r1 / (r1 - r2)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R2, thickness=0.0, length=LENGTH)
y_vals = np.linspace(R1, R2, 100)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], 0.0)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R2, thickness=D, length=LENGTH, direction=-1)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], 0.5 * D)
np.testing.assert_almost_equal(np.copy(shape.center), [0.0, 0.0, 0.0])
np.testing.assert_almost_equal(np.copy(shape.axis), [0, 0, 1])
self.assertEqual(shape.r1, R1)
self.assertEqual(shape.r2, R2)
self.assertEqual(shape.thickness, D)
self.assertEqual(shape.length, LENGTH)
self.assertEqual(shape.direction, -1)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R2, thickness=D, length=LENGTH)
for y in y_vals:
dist = shape.calc_distance(position=[0.0, y, z(y, R1, R2, LENGTH)])
self.assertAlmostEqual(dist[0], -0.5 * D)
# check sign of dist
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R1, thickness=D, length=LENGTH)
self.assertLess(shape.calc_distance(
position=[0.0, R1, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 + (0.5 - sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 + (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 - (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
shape = espressomd.shapes.HollowConicalFrustum(center=[0.0, 0.0, 0.0], axis=[
0, 0, 1], r1=R1, r2=R1, thickness=D, length=LENGTH, direction=-1)
self.assertGreater(shape.calc_distance(
position=[0.0, R1, 0.25 * LENGTH])[0], 0.0)
self.assertGreater(shape.calc_distance(
position=[0.0, R1 + (0.5 - sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 + (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
self.assertLess(shape.calc_distance(
position=[0.0, R1 - (0.5 + sys.float_info.epsilon) * D, 0.25 * LENGTH])[0], 0.0)
def test_sphere(self):
"""Checks geometry of an inverted sphere
"""
rad = self.box_l / 2.0
sphere_shape = espressomd.shapes.Sphere(
center=3 * [rad],
radius=rad,
direction=-1)
phi_steps = 11
theta_steps = 11
for distance in {-1.2, 2.6}:
for phi in range(phi_steps):
phi_angle = phi / phi_steps * 2.0 * math.pi
for theta in range(theta_steps):
theta_angle = theta / theta_steps * math.pi
pos = np.array(
[math.cos(phi_angle) * math.sin(theta_angle)
* (rad + distance),
math.sin(phi_angle) * math.sin(theta_angle)
* (rad + distance),
math.cos(theta_angle) * (rad + distance)]) + rad
shape_dist, _ = sphere_shape.calc_distance(
position=pos.tolist())
self.assertAlmostEqual(shape_dist, -distance)
def test_ellipsoid(self):
"""Checks that distance of particles on the ellipsoid constraint's surface is zero.
For the case of a spherical ellipsoid, also several non-zero distances are tested.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
system.part.add(pos=[0., 0., 0.], type=0)
# abuse generic LJ to measure distance via the potential V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=7., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
N = 10
# check oblate ellipsoid
semiaxes = [2.18, 5.45]
e = espressomd.shapes.Ellipsoid(
a=semiaxes[0],
b=semiaxes[1],
center=3 * [self.box_l / 2.],
direction=+1)
constraint_e = espressomd.constraints.ShapeBasedConstraint(
shape=e, particle_type=1, penetrable=True)
const1 = system.constraints.add(constraint_e)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
pos = self.pos_on_surface(
theta, v, semiaxes[0], semiaxes[1], semiaxes[1])
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., places=6)
system.constraints.remove(const1)
# check prolate ellipsoid
semiaxes = [3.61, 2.23]
e = espressomd.shapes.Ellipsoid(
a=semiaxes[0],
b=semiaxes[1],
center=3 * [self.box_l / 2.],
direction=+1)
constraint_e = espressomd.constraints.ShapeBasedConstraint(
shape=e, particle_type=1, penetrable=True)
const1 = system.constraints.add(constraint_e)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
pos = self.pos_on_surface(
theta, v, semiaxes[0], semiaxes[1], semiaxes[1])
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., places=6)
# check sphere (multiple distances from surface)
# change ellipsoid parameters instead of creating a new constraint
e.a = 1.
e.b = 1.
radii = np.linspace(1., 6.5, 7)
for i in range(N):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
for r in radii:
pos = self.pos_on_surface(theta, v, r, r, r)
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], r - 1.)
# Reset the interaction to zero
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_cylinder(self):
"""Tests if shape based constraints can be added to a system both by
(1) defining a constraint object which is then added
(2) and via keyword arguments.
Checks that cylinder constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
rad = self.box_l / 2.0
length = self.box_l / 2.0
system.part.add(id=0, pos=[rad, 1.02, rad], type=0)
# check force calculation of a cylinder without top and bottom
interaction_dir = -1 # constraint is directed inwards
cylinder_shape = espressomd.shapes.Cylinder(
center=3 * [rad],
axis=[0, 0, 1],
direction=interaction_dir,
radius=rad,
length=self.box_l + 5) # +5 in order to have no top or bottom
penetrability = False # impenetrable
outer_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=cylinder_shape, particle_type=1, penetrable=penetrability)
outer_cylinder_wall = system.constraints.add(outer_cylinder_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(outer_cylinder_constraint.min_dist(), 1.02)
# test summed forces on cylinder wall
self.assertAlmostEqual(
-1.0 * outer_cylinder_wall.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.02),
places=10) # minus for Newton's third law
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l - 1.02
system.part.add(id=1, pos=[rad, y_part2, rad], type=0)
system.integrator.run(0)
dist_part2 = self.box_l - y_part2
self.assertAlmostEqual(outer_cylinder_wall.total_force()[2], 0.0)
self.assertAlmostEqual(
outer_cylinder_wall.total_normal_force(),
2 *
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=dist_part2))
# Test the geometry of a cylinder with top and bottom
cylinder_shape_finite = espressomd.shapes.Cylinder(
center=3 * [rad],
axis=[0, 0, 1],
direction=1,
radius=rad,
length=length)
phi_steps = 11
for distance in {-3.6, 2.8}:
for z in range(int(self.box_l)):
center = np.array([rad, rad, z])
start_point = np.array([rad, 2 * rad - distance, z])
for phi in range(phi_steps):
# Rotation around the axis of the cylinder
phi_angle = phi / phi_steps * 2.0 * math.pi
phi_rot_matrix = np.array(
[[math.cos(phi_angle), -math.sin(phi_angle), 0.0],
[math.sin(phi_angle), math.cos(phi_angle), 0.0],
[0.0, 0.0, 1.0]])
phi_rot_point = np.dot(
phi_rot_matrix, start_point - center) + center
shape_dist, _ = cylinder_shape_finite.calc_distance(
position=phi_rot_point.tolist())
dist = -distance
if distance > 0.0:
if z < (self.box_l - length) / 2.0 + distance:
dist = (self.box_l - length) / 2.0 - z
elif z > (self.box_l + length) / 2.0 - distance:
dist = z - (self.box_l + length) / 2.0
else:
dist = -distance
else:
if z < (self.box_l - length) / 2.0:
z_dist = (self.box_l - length) / 2.0 - z
dist = math.sqrt(z_dist**2 + distance**2)
elif z > (self.box_l + length) / 2.0:
z_dist = z - (self.box_l + length) / 2.0
dist = math.sqrt(z_dist**2 + distance**2)
else:
dist = -distance
self.assertAlmostEqual(shape_dist, dist)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_spherocylinder(self):
"""Checks that spherocylinder constraints with LJ interactions exert
forces on a test particle (that is, the constraints do what they should)
using geometrical parameters of (1) an infinite cylinder and (2) a
finite spherocylinder.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
system.part.add(
id=0, pos=[self.box_l / 2.0, 1.02, self.box_l / 2.0], type=0)
# check force calculation of spherocylinder constraint
# (1) infinite cylinder
interaction_dir = -1 # constraint is directed inwards
spherocylinder_shape = espressomd.shapes.SpheroCylinder(
center=3 * [self.box_l / 2.0],
axis=[0, 0, 1],
direction=interaction_dir,
radius=self.box_l / 2.0,
length=self.box_l + 5) # +5 in order to have no top or bottom
penetrability = False # impenetrable
outer_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=spherocylinder_shape, particle_type=1, penetrable=penetrability)
system.constraints.add(outer_cylinder_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(outer_cylinder_constraint.min_dist(), 1.02)
# test summed forces on cylinder wall
self.assertAlmostEqual(
-1.0 * outer_cylinder_constraint.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.02),
places=10) # minus for Newton's third law
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l - 1.02
system.part.add(
id=1, pos=[self.box_l / 2.0, y_part2, self.box_l / 2.0], type=0)
system.integrator.run(0)
dist_part2 = self.box_l - y_part2
self.assertAlmostEqual(outer_cylinder_constraint.total_force()[2], 0.0)
self.assertAlmostEqual(outer_cylinder_constraint.total_normal_force(),
2 * tests_common.lj_force(
espressomd, cutoff=2.0, offset=0.,
eps=1.0, sig=1.0, r=dist_part2))
# Reset
system.part.clear()
system.constraints.clear()
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
# (2) finite spherocylinder
system.part.clear()
interaction_dir = -1 # constraint is directed inwards
spherocylinder_shape = espressomd.shapes.SpheroCylinder(
center=3 * [self.box_l / 2.0],
axis=[0, 1, 0],
direction=interaction_dir,
radius=10.0,
length=6.0)
penetrability = True # penetrable
inner_cylinder_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=spherocylinder_shape, particle_type=1, penetrable=penetrability)
system.constraints.add(inner_cylinder_constraint)
# V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=10., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
# check hemispherical caps (multiple distances from surface)
N = 10
radii = np.linspace(1., 10., 10)
system.part.add(pos=[0., 0., 0.], type=0)
for i in range(6):
for j in range(N):
theta = 2. * i / float(N) * np.pi
v = j / float(N - 1) * 2. - 1
for r in radii:
pos = self.pos_on_surface(theta, v, r, r, r) + [0, 3, 0]
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
energy = system.analysis.energy()
self.assertAlmostEqual(energy["total"], 10. - r)
# Reset
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_wall_forces(self):
"""Tests if shape based constraints can be added to a system both by
(1) defining a constraint object which is then added
(2) and via keyword arguments.
Checks that wall constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.part.add(id=0, pos=[5., 1.21, 0.83], type=0)
# Check forces are initialized to zero
f_part = system.part[0].f
self.assertEqual(f_part[0], 0.)
self.assertEqual(f_part[1], 0.)
self.assertEqual(f_part[2], 0.)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=1.5, sigma=1.0, cutoff=2.0, shift=0)
shape_xz = espressomd.shapes.Wall(normal=[0., 1., 0.], dist=0.)
shape_xy = espressomd.shapes.Wall(normal=[0., 0., 1.], dist=0.)
# (1)
constraint_xz = espressomd.constraints.ShapeBasedConstraint(
shape=shape_xz, particle_type=1)
wall_xz = system.constraints.add(constraint_xz)
# (2)
wall_xy = system.constraints.add(shape=shape_xy, particle_type=2)
system.integrator.run(0) # update forces
f_part = system.part[0].f
self.assertEqual(f_part[0], 0.)
self.assertAlmostEqual(
f_part[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.21),
places=10)
self.assertAlmostEqual(
f_part[2],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# test summed forces on walls
self.assertAlmostEqual(
-1.0 * wall_xz.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=1.21),
places=10) # minus for Newton's third law
self.assertAlmostEqual(
-1.0 * wall_xy.total_force()[2],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# check whether total_normal_force is correct
self.assertAlmostEqual(
wall_xy.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.5,
sig=1.0,
r=0.83),
places=10)
# this one is closer and should get the mindist()
system.part.add(pos=[5., 1.20, 0.82], type=0)
self.assertAlmostEqual(constraint_xz.min_dist(), system.part[1].pos[1])
self.assertAlmostEqual(wall_xz.min_dist(), system.part[1].pos[1])
self.assertAlmostEqual(wall_xy.min_dist(), system.part[1].pos[2])
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_slitpore(self):
"""Checks that slitpore constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
# check force calculation of slitpore constraint
slitpore_shape = espressomd.shapes.Slitpore(
channel_width=5,
lower_smoothing_radius=2,
upper_smoothing_radius=3,
pore_length=15,
pore_mouth=20,
pore_width=10,
dividing_plane=self.box_l / 2)
slitpore_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=slitpore_shape, particle_type=1, penetrable=True)
system.constraints.add(slitpore_constraint)
# V(r) = r
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=1., sigma=1., cutoff=10., shift=0., offset=0., e1=-1, e2=0, b1=1., b2=0.)
system.part.add(pos=[0., 0., 0.], type=0)
x = self.box_l / 2.0
d = 1 - np.sqrt(2) / 2
parameters = [
([x, x, 1.], -4., [0., 0., -1.]), # outside channel
([x, x, 15.], 5., [-1., 0., 0.]), # inside channel
([x, x, 5.], 0., [0., 0., 0.]), # on channel bottom surface
([x - 5., x, 15.], 0., [0., 0., 0.]), # on channel side surface
([x + 5., x, 15.], 0., [0., 0., 0.]), # on channel side surface
([x - 5. + 2 * d, x, 5. + 2 * d], 0., [0., 0., 0.]), # lower circle
([x + 5. - 2 * d, x, 5. + 2 * d], 0., [0., 0., 0.]), # lower circle
([x - 5. - 3 * d, x, 20. - 3 * d], 0., [0., 0., 0.]), # upper circle
([x + 5. + 3 * d, x, 20. - 3 * d], 0., [0., 0., 0.]), # upper circle
([1., x, 20.], 0., [0., 0., 0.]), # on inner wall surface
([x, x, 25.], 0., [0., 0., 0.]), # on outer wall surface
([x, x, 27.], -2., [0., 0., 1.]), # outside wall
]
for pos, ref_mindist, ref_force in parameters:
system.part[0].pos = pos
system.integrator.run(recalc_forces=True, steps=0)
obs_mindist = slitpore_constraint.min_dist()
self.assertAlmostEqual(obs_mindist, ref_mindist, places=10)
if (ref_mindist == 0. and obs_mindist != 0.):
# force direction on a circle is not well-defined due to
# numerical instability
continue
np.testing.assert_almost_equal(
np.copy(slitpore_constraint.total_force()), ref_force, 10)
# Reset
system.non_bonded_inter[0, 1].generic_lennard_jones.set_params(
epsilon=0., sigma=0., cutoff=0., shift=0., offset=0., e1=0, e2=0, b1=0., b2=0.)
def test_rhomboid(self):
"""Checks that rhomboid constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should)
using the geometrical parameters of (1) a cuboid and (2) a rhomboid.
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
# check force calculation of rhomboid constraint
# (1) using a cuboid
interaction_dir = +1 # constraint is directed outwards
length = np.array([-5.0, 6.0, 7.0]) # dimension of the cuboid
corner = np.array(3 * [self.box_l / 2.0])
rhomboid_shape = espressomd.shapes.Rhomboid(
corner=corner,
a=[length[0], 0.0, 0.0], # cube
b=[0.0, length[1], 0.0],
c=[0.0, 0.0, length[2]],
direction=interaction_dir
)
penetrability = False # impenetrable
rhomboid_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=rhomboid_shape, particle_type=1, penetrable=penetrability)
rhomboid_constraint = system.constraints.add(rhomboid_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.part.add(id=0, pos=[self.box_l / 2.0 + length[0] / 2.0,
self.box_l / 2.0 + length[1] / 2.0,
self.box_l / 2.0 - 1], type=0)
system.integrator.run(0) # update forces
f_part = system.part[0].f
self.assertEqual(rhomboid_constraint.min_dist(), 1.)
self.assertEqual(f_part[0], 0.)
self.assertEqual(f_part[1], 0.)
self.assertAlmostEqual(
-f_part[2],
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
x_range = 12
y_range = 12
z_range = 12
for x in range(x_range):
for y in range(y_range):
for z in range(z_range):
pos = np.array(
[x + (self.box_l + length[0] - x_range) / 2.0,
y + (self.box_l + length[1] - y_range) / 2.0,
z + (self.box_l + length[2] - z_range) / 2.0])
shape_dist, shape_dist_vec = rhomboid_shape.calc_distance(
position=pos.tolist())
outside = False
edge_case = False
dist_vec = np.array([0.0, 0.0, 0.0])
# check if outside or inside
if(pos[0] < (self.box_l + length[0] - abs(length[0])) / 2.0 or
pos[0] > (self.box_l + length[0] + abs(length[0])) / 2.0 or
pos[1] < (self.box_l + length[1] - abs(length[1])) / 2.0 or
pos[1] > (self.box_l + length[1] + abs(length[1])) / 2.0 or
pos[2] < (self.box_l + length[2] - abs(length[2])) / 2.0 or
pos[2] > (self.box_l + length[2] + abs(length[2])) / 2.0):
outside = True
if outside:
for i in range(3):
if pos[i] < (self.box_l + length[i] -
abs(length[i])) / 2.0:
dist_vec[i] = pos[i] - (
self.box_l + length[i] - abs(length[i])) / 2.0
elif pos[i] > (self.box_l + length[i] + abs(length[i])) / 2.0:
dist_vec[i] = pos[i] - (
self.box_l + length[i] + abs(length[i])) / 2.0
else:
dist_vec[i] = 0.0
dist = np.linalg.norm(dist_vec)
else:
dist = self.box_l
c1 = pos - corner
c2 = corner + length - pos
abs_c1c2 = np.abs(np.concatenate((c1, c2)))
dist = np.amin(abs_c1c2)
where = np.argwhere(dist == abs_c1c2)
if len(where) > 1:
edge_case = True
for which in where:
if which < 3:
dist_vec[which] = dist * np.sign(c1[which])
else:
dist_vec[which - 3] = -dist * \
np.sign(c2[which - 3])
dist *= -interaction_dir
if edge_case:
for i in range(3):
if shape_dist_vec[i] != 0.0:
self.assertAlmostEqual(
abs(shape_dist_vec[i]), abs(dist_vec[i]))
else:
self.assertAlmostEqual(shape_dist_vec[0], dist_vec[0])
self.assertAlmostEqual(shape_dist_vec[1], dist_vec[1])
self.assertAlmostEqual(shape_dist_vec[2], dist_vec[2])
self.assertAlmostEqual(shape_dist, dist)
# (2) using a rhomboid
rhomboid_shape.a = [5., 5., 0.] # rhomboid
rhomboid_shape.b = [0., 0., 5.]
rhomboid_shape.c = [0., 5., 0.]
system.part[0].pos = [self.box_l / 2.0 + 2.5,
self.box_l / 2.0 + 2.5,
self.box_l / 2.0 - 1]
system.integrator.run(0) # update forces
self.assertEqual(rhomboid_constraint.min_dist(), 1.)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.),
places=10)
system.part[0].pos = system.part[0].pos - [0., 1., 0.]
system.integrator.run(0) # update forces
self.assertAlmostEqual(
rhomboid_constraint.min_dist(), 1.2247448714, 10)
self.assertAlmostEqual(
rhomboid_constraint.total_normal_force(),
tests_common.lj_force(
espressomd,
cutoff=2.,
offset=0.,
eps=1.,
sig=1.,
r=1.2247448714),
places=10)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
def test_torus(self):
"""Checks that torus constraints with LJ interactions exert forces
on a test particle (that is, the constraints do what they should).
"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.4
interaction_dir = 1 # constraint is directed inwards
radius = self.box_l / 4.0
tube_radius = self.box_l / 6.0
part_offset = 1.2
system.part.add(
id=0, pos=[self.box_l / 2.0, self.box_l / 2.0 + part_offset, self.box_l / 2.0], type=0)
# check force calculation of cylinder constraint
torus_shape = espressomd.shapes.Torus(
center=3 * [self.box_l / 2.0],
normal=[0, 0, 1],
direction=interaction_dir,
radius=radius,
tube_radius=tube_radius)
penetrability = False # impenetrable
torus_constraint = espressomd.constraints.ShapeBasedConstraint(
shape=torus_shape, particle_type=1, penetrable=penetrability)
torus_wall = system.constraints.add(torus_constraint)
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(0) # update forces
self.assertAlmostEqual(torus_constraint.min_dist(),
radius - tube_radius - part_offset)
# test summed forces on torus wall
self.assertAlmostEqual(
torus_wall.total_force()[1],
tests_common.lj_force(
espressomd,
cutoff=2.0,
offset=0.,
eps=1.0,
sig=1.0,
r=torus_constraint.min_dist()),
places=10)
# check whether total_summed_outer_normal_force is correct
y_part2 = self.box_l / 2.0 + 2.0 * radius - part_offset
system.part.add(
id=1, pos=[self.box_l / 2.0, y_part2, self.box_l / 2.0], type=0)
system.integrator.run(0)
self.assertAlmostEqual(torus_wall.total_force()[1], 0.0)
self.assertAlmostEqual(torus_wall.total_normal_force(), 2 * tests_common.lj_force(
espressomd, cutoff=2.0, offset=0., eps=1.0, sig=1.0,
r=radius - tube_radius - part_offset))
# Test the geometry of the shape directly
phi_steps = 11
theta_steps = 11
center = np.array([self.box_l / 2.0,
self.box_l / 2.0,
self.box_l / 2.0])
tube_center = np.array([self.box_l / 2.0,
self.box_l / 2.0 + radius,
self.box_l / 2.0])
for distance in {1.02, -0.7}:
start_point = np.array([self.box_l / 2.0,
self.box_l / 2.0 + radius -
tube_radius - distance,
self.box_l / 2.0])
for phi in range(phi_steps):
for theta in range(theta_steps):
# Rotation around the tube
theta_angle = theta / theta_steps * 2.0 * math.pi
theta_rot_matrix = np.array(
[[1.0, 0.0, 0.0],
[0.0, math.cos(theta_angle), -math.sin(theta_angle)],
[0.0, math.sin(theta_angle), math.cos(theta_angle)]])
theta_rot_point = np.dot(
theta_rot_matrix,
start_point - tube_center)
theta_rot_point += tube_center
# Rotation around the center of the torus
phi_angle = phi / phi_steps * 2.0 * math.pi
phi_rot_matrix = np.array(
[[math.cos(phi_angle), -math.sin(phi_angle), 0.0],
[math.sin(phi_angle), math.cos(phi_angle), 0.0],
[0.0, 0.0, 1.0]])
phi_rot_point = np.dot(
phi_rot_matrix,
theta_rot_point - center) + center
shape_dist, _ = torus_shape.calc_distance(
position=phi_rot_point.tolist())
self.assertAlmostEqual(shape_dist, distance)
# Reset
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0)
if __name__ == "__main__":
ut.main()
| KaiSzuttor/espresso | testsuite/python/constraint_shape_based.py | Python | gpl-3.0 | 36,175 |
import sys
import gzip
import time
import numpy
import json
import time
import logging
import operator
import config
import csv
import StringIO
import subprocess
import HTMLParser
#from py2neo import Graph, Path, Node, Relationship,authenticate
from scipy import stats
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404, render
from browser.forms import (CreateSemSet,ComSearchSets,CreateSearchSet,CreatePubSet)
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
#from browser.medline_parser import *
from browser.tasks import *
from browser.models import SearchSet,Compare,Overlap,Filters
from django.template.defaulttags import register
#from django.template.context_processors import csrf
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
from math import exp
from django_datatables_view.base_datatable_view import BaseDatatableView
from collections import defaultdict
from django.views.decorators.csrf import csrf_exempt
from django.core.cache import cache
from django.views.decorators.cache import cache_page
from django.core.serializers.json import DjangoJSONEncoder
from django.core import serializers
from sets import Set
from settings import DATA_FOLDER
#neo4j
from neo4j.v1 import GraphDatabase,basic_auth
auth_token = basic_auth(config.user, config.password)
driver = GraphDatabase.driver("bolt://"+config.server+":"+config.port,auth=auth_token)
#logger.debug(config.server)
#===============GoogleAuth Start
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.views.generic.base import View
from social_auth.backends import AuthFailed
from social_auth.views import complete
from django.contrib.auth.decorators import login_required
from django.conf import settings
#rest API
from rest_framework import viewsets
from browser.serializers import SearchSetSerializer
#logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',level=logging.WARNING)
#logger = logging.getLogger(__name__)
#logging.basicConfig(filename='run.log',level=logging.DEBUG)
class SearchSetViewSet(viewsets.ModelViewSet):
#queryset = SearchSet.objects.all()
serializer_class = SearchSetSerializer
def get_queryset(self):
user = self.request.user
logger.debug('user.id = '+str(user.id))
return SearchSet.objects.filter(user_id=str(user.id))
#return SearchSet.objects.all()
class AuthComplete(View):
def get(self, request, *args, **kwargs):
logging.warning('error')
backend = kwargs.pop('backend')
try:
return complete(request, backend, *args, **kwargs)
except AuthFailed:
logging.warning('error')
messages.error(request, "Your Google Apps domain isn't authorized for this app")
return HttpResponseRedirect(reverse('gauth_login'))
class LoginError(View):
def get(self, request, *args, **kwargs):
return HttpResponse(status=401)
#===============GoogleAuth End
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def mysplit(value, sep = "."):
parts = value.split(sep)
return (parts)
tmpDir=settings.MEDIA_ROOT
def people(request):
#example query
sem="match (s:SearchSet)--(p:Pubmed)--(st:SDB_triple)--(si:SDB_item) where s.name = 'tom gaunt_2' return count(distinct(p)) as c,si order by c;"
mesh="match (s:SearchSet)--(p:Pubmed)--(m:Mesh) where s.name = 'tom gaunt_2' return count(distinct(p)) as c,m order by c;"
def ajax_graph_metrics(request):
session = driver.session()
logger.debug('getting graph metrics')
uAll=SearchSet.objects.order_by().values_list('user_id',flat=True).exclude(user_id='2').distinct().count()
if request.is_ajax():
# get data for graph
# only want search sets that I (user 2) haven't created
#gCom = "match (s:SearchSet) where not s.name =~ '.*_2' return count(s) as s union match (s:Pubmed) return count(s) as s union match (s:Mesh) return count(s) as s union match (s:SDB_triple) return count(s) as s union match (s:SDB_item) return count(s) as s;"
gCom = "match (s:Pubmed) return count(s) as s union match (s:Mesh) return count(s) as s union match (s:SDB_triple) return count(s) as s union match (s:SDB_item) return count(s) as s;"
logger.debug(gCom)
#data = [int(uAll)]
data = []
for res in session.run(gCom):
data.append(res[0])
metrics = data
logger.debug(data)
# get user and article set over time
# select user_id,job_start from browser_searchset where user_id != 2 and job_status = 'Complete';
logger.debug("getting time data...")
s = SearchSet.objects.filter(job_status='Complete').exclude(user_id='2')
tData = []
aDic = {}
for i in s:
u = i.user_id
t = i.job_start.split(" ")[0].split("-")[0:2]
t = "-".join(t)
if t in aDic:
aDic[t].append(u)
else:
aDic[t] = [u]
c = Compare.objects.filter(job_status='View results').exclude(user_id='2')
cDic = {}
for i in c:
#id = i.id
id = i.job_name
t = i.job_start.split(" ")[0].split("-")[0:2]
t = "-".join(t)
if t in cDic:
cDic[t].append(id)
else:
cDic[t] = [id]
cats = []
uCounts = []
aCounts = []
cCounts = []
cCountOld = []
oldCount = []
for a in sorted(aDic):
cats.append(a)
#logger.debug(a)
if a in aDic:
uCount = len(list(set(aDic[a] + oldCount)))
uCounts.append(uCount)
aCount = len(aDic[a] + oldCount)
aCounts.append(aCount)
oldCount = aDic[a] + oldCount
else:
uCounts.append(0)
aCounts.append(0)
if a in cDic:
cCount = len(list(set(cDic[a] + cCountOld)))
cCounts.append(cCount)
cCountOld = cDic[a] + cCountOld
else:
cCounts.append(0)
lastTop=24
uCounts = uCounts[-lastTop:len(uCounts)]
aCounts = aCounts[-lastTop:len(aCounts)]
cCounts = cCounts[-lastTop:len(cCounts)]
cats = cats[-lastTop:len(cats)]
#logger.debug(uCounts)
else:
data = 'fail'
logger.debug('not ajax request')
mimetype = 'application/json'
session.close()
return HttpResponse(json.dumps({'metrics':metrics,'uCounts':uCounts,'aCounts':aCounts,'cCounts':cCounts,'cats':cats}), mimetype)
def ajax_test(request):
object = ''
f = tmpDir+'/new.txt'
logger.debug('looking for file ',f)
if os.path.isfile(f):
object = "file exists"
logger.debug('object exists')
return HttpResponse(object)
def issn_to_name(iList):
logger.debug('Running issn_to_name'+str(iList))
#check for null entries
if 'null' in iList:
iList.remove('null')
iString = ",".join(iList)
start=time.time()
print "\n### Getting ids ###"
url="http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
#params = {'term': '0140-6736+OR+0022-2275'}
params = {'term': iString}
r = requests.post(url)
# GET with params in URL
r = requests.get(url, params=params)
#create random file name
n = 10
ran=''.join(["%s" % randint(0, 9) for num in range(0, n)])
rSplit = r.text.split("<")
iDic = {}
iName = []
for i in rSplit:
l = re.match(r'To>(.*?)$', i)
if l:
m = l.group(1).replace('[Journal]','').replace('"','').strip().encode("ascii")
iName.append(m)
for i in range(0,len(iList)):
iDic[iList[i]]=iName[i]
logger.debug(iDic)
return iDic
def pubmed_id_details(pList):
logger.debub('Getting pubmed info')
def pmid_to_info(pList):
#logger.debug('Running pmid_to_info'+str(pList))
iString = ",".join(pList)
start=time.time()
print "\n### Getting ids ###"
url="http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?"
#params = {'term': '0140-6736+OR+0022-2275'}
params = {'db':'pubmed','id': iString}
# GET with params in URL
r = requests.get(url, params=params)
#print r.text
rSplit = r.text.split("<")
ptDic = {}
pjDic = {}
t = jt = 'n/a'
for i in rSplit:
#print "i",i
#check pubmed id
pmid_match = re.match(r'Item Name="pubmed" Type="String">(.*?)$', i)
if pmid_match:
pmid = pmid_match.group(1)
#print pmid
#get title
t_match = re.match(r'Item Name="Title" Type="String">(.*?)$', i)
if t_match:
t = t_match.group(1)
#print t
#get jorunal name
jt_match = re.match(r'Item Name="FullJournalName" Type="String">(.*?)$', i)
if jt_match:
jt = jt_match.group(1)
#print jt
entry_match = re.match(r'/DocSum>', i)
if entry_match:
#print "\n"
ptDic[pmid]=t
pjDic[pmid]=jt
jt='n/a'
t='n/a'
#print pDic
return [ptDic,pjDic]
def about(request):
context = {'nbar': 'about'}
return render_to_response('about.html', context, context_instance=RequestContext(request))
def citation(request):
context = {'nbar': 'citation'}
return render_to_response('citation.html', context, context_instance=RequestContext(request))
def help(request):
context = {'nbar': 'help'}
return render_to_response('help.html', context, context_instance=RequestContext(request))
def dt_test_page(request):
return render_to_response('dt_test_page.html')
def contact(request):
context = {'nbar': 'contact'}
return render_to_response('contact.html', context, context_instance=RequestContext(request))
def get_semmed_items(request):
session = driver.session()
if request.is_ajax():
q = request.GET.get('term', '').split(',')[-1].strip()
logger.debug('q = '+q)
#get data for autocomplete
gCom = "match (sem:SDB_item) where sem.name =~ '(?i)"+q+".*' return sem.name;"
logger.debug(gCom)
sList = []
for res in session.run(gCom):
v=res[0].encode("ascii")
json_data = {}
json_data['id']=v
json_data['label']=v
json_data['value']=v
sList.append(json_data)
sList_json = json.dumps(sList)
logger.debug(len(sList))
else:
data = 'fail'
logger.debug('not ajax request')
mimetype = 'application/json'
session.close()
return HttpResponse(sList_json, mimetype)
def index(request):
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In index")
form1 = CreateSearchSet()
form_sem = CreateSemSet()
form2 = ComSearchSets()
form_pub = CreatePubSet()
if request.method == 'POST':
# create a form instance and populate it with data from the request:
if request.POST['formType'] == "ss":
if request.user.is_authenticated():
form1 = CreateSearchSet(request.POST, request.FILES)
#print "f = ",request.FILES
# check whether it's valid:
if form1.is_valid():
# process the data in form.cleaned_data as required
ss_file = request.FILES['ss_file']
#save to file
fileStore = tmpDir+'abstracts/'+str(ss_file)
id=form1.cleaned_data['job_name'].strip()
#remove special characters
id = re.sub('[^A-Za-z0-9 _-]+', '', id)
desc=form1.cleaned_data['ss_desc'].strip()
searchParams=[id,str(request.user.id)]
#add job and user data to sqlite db
q = SearchSet(user_id=str(request.user.id), job_name=id, job_start=time.strftime("%Y-%m-%d %H:%M:%S"),job_status='Pending',ss_desc=desc,pTotal=0,ss_file=ss_file,job_progress=0)
q.save()
#run job in background
#j = db_citations.delay(searchParams,fileStore)
j = pmid_process.delay(searchParams,fileStore)
SearchSet.objects.filter(user_id=str(request.user.id),job_name=id).update(job_id=j)
# redirect to a new URL:
return HttpResponseRedirect('jobs/')
else:
logger.debug(userInfo+"User authentication problem")
return HttpResponseRedirect('/')
if request.POST['formType'] == "ss_sem":
if request.user.is_authenticated():
form_sem = CreateSemSet(request.POST)
#print "f = ",request.FILES
# check whether it's valid:
if form_sem.is_valid():
# process the data in form.cleaned_data as required
#add to graph db
id=form_sem.cleaned_data['job_name'].strip()
#remove special characters
id = re.sub('[^A-Za-z0-9 _-]+', '', id)
desc=form_sem.cleaned_data['ss_desc'].strip()
sem_location = request.POST["ss_sem"]
logger.debug('job_name = '+id)
logger.debug('desc = '+desc)
logger.debug('sem location = '+sem_location)
searchParams=[id,str(request.user.id), sem_location,desc]
#add job and user data to sqlite db
descWithSem = sem_location+": "+desc
q = SearchSet(user_id=str(request.user.id), job_name=id, job_start=time.strftime("%Y-%m-%d %H:%M:%S"),job_status='Pending',ss_desc=descWithSem,pTotal=0,ss_file='',job_progress=0)
q.save()
#run job in background
logger.debug(userInfo+"Running db_sem")
j = db_sem.delay(searchParams)
SearchSet.objects.filter(user_id=str(request.user.id),job_name=id).update(job_id=j)
# redirect to a new URL:
return HttpResponseRedirect('jobs/')
else:
logger.debug(userInfo+"User authentication problem")
return HttpResponseRedirect('/')
if request.POST['formType'] == "ss_pub":
if request.user.is_authenticated():
form_pub = CreatePubSet(request.POST)
#print "f = ",request.FILES
# check whether it's valid:
if form_pub.is_valid():
# process the data in form.cleaned_data as required
#add to graph db
id=form_pub.cleaned_data['job_name'].strip()
#remove special characters
id = re.sub('[^A-Za-z0-9 _-]+', '', id)
desc=form_pub.cleaned_data['ss_desc'].strip()
logger.debug('job_name = '+id)
logger.debug('desc = '+desc)
searchParams=[id,str(request.user.id),desc]
q = SearchSet(user_id=str(request.user.id), job_name=id, job_start=time.strftime("%Y-%m-%d %H:%M:%S"),job_status='Pending',ss_desc=desc,pTotal=0,ss_file='',job_progress=0)
q.save()
#run job in background
logger.debug(userInfo+"Running pub_sem")
j = pub_sem.delay(searchParams)
SearchSet.objects.filter(user_id=str(request.user.id),job_name=id).update(job_id=j)
# redirect to a new URL:
return HttpResponseRedirect('jobs/')
else:
logger.debug(userInfo+"User authentication problem")
return HttpResponseRedirect('/')
if request.POST['formType'] == "com":
logger.debug(userInfo+"Comparing search sets")
form2 = ComSearchSets(request.POST)
if form2.is_valid():
ass=form2.cleaned_data['a']
bss=form2.cleaned_data['b']
comType = form2.cleaned_data['comType']
#get year range data
yearRange = request.POST["yearRange"]
logger.debug('yearRange:'+yearRange)
#add one year to upper bound to make it inclusive
year2 = int(yearRange.split("-")[1].strip())
yearRange = yearRange.split("-")[0].strip()+" - "+str(year2)
logger.debug('yearRange corrected:'+yearRange)
#check if analysing one or two search sets
if len(ass)>1 and len(bss)==0:
logger.debug(userInfo+"analysing single search set")
logger.debug("ss1 - "+str(ass))
s1=SearchSet.objects.get(job_name=ass,user_id=str(request.user.id))
jobName = str(s1.id)
for c in comType:
print "c = ",c
try:
jCheck = Compare.objects.get(job_name=jobName,year_range=yearRange,user_id=str(request.user.id),job_type=c)
logger.debug(userInfo+"job_status = "+str(jCheck.job_status))
#delete entry if not complete and resubmitted
if jCheck.job_progress != 100:
logger.debug(userInfo+"Deleting job: "+str(jCheck.job_name))
jCheck.delete()
jCheck = False
except ObjectDoesNotExist:
jCheck = False
if jCheck==False:
jobDesc = str(s1.job_name)
q = Compare(user_id=str(request.user.id), year_range=yearRange, job_desc=jobDesc, job_name=jobName, job_start=time.strftime("%Y-%m-%d %H:%M:%S"), job_status='Pending',job_type=c,job_progress=0)
q.save()
j=single_ss_Wrapper.delay(str(request.user.id),s1.id,c,yearRange)
else:
logger.debug(userInfo+"Search set comparison already run")
elif len(ass)>1 and len(bss)>1:
logger.debug(userInfo+"analysing two search sets")
logger.debug("ss1 - "+str(ass))
logger.debug("ss2 - "+str(bss))
#get ids for search sets
s1=SearchSet.objects.get(job_name=ass,user_id=str(request.user.id))
s2=SearchSet.objects.get(job_name=bss,user_id=str(request.user.id))
#include year2 to deal with year filtering option
jobName = str(s1.id)+"_"+str(s2.id)+"_"+str(year2)
for jobType in comType:
logger.debug("jobType = "+jobType)
try:
jCheck = Compare.objects.get(job_name=jobName,year_range=yearRange,user_id=str(request.user.id),job_type=jobType)
logger.debug(userInfo+"job_status = "+str(jCheck.job_status))
#removed this section as allows same job to run if already running.
#delete entry if not complete and resubmitted
#if jCheck.job_progress != 100:
# logger.debug(userInfo+"Deleting job: "+str(jCheck.job_name))
# jCheck.delete()
# jCheck = False
except ObjectDoesNotExist:
jCheck = False
if jCheck==False:
jobDesc = str(s1.job_name)+" : "+str(s2.job_name)
q = Compare(user_id=str(request.user.id), job_desc=jobDesc, year_range=yearRange, job_name=jobName, job_start=time.strftime("%Y-%m-%d %H:%M:%S"), job_status='Pending',job_type=jobType,job_progress=0)
q.save()
#j=comWrapper.delay(str(request.user.id),s1.id,s2.id,jobType,yearRange)
j=comWrapper.delay(q.id)
else:
logger.debug(userInfo+"Search set comparison already run")
return HttpResponseRedirect('jobs/')
else:
form1 = CreateSearchSet()
form_sem = CreateSemSet()
form2 = ComSearchSets()
form_pub = CreatePubSet()
#get search set data for table
j=SearchSet.objects.filter(user_id=str(request.user.id),job_status='Complete')
#get example data for table
exampleData=[]
eCheck={}
e=Compare.objects.filter(user_id='None',job_status='View results')
for i in e:
eName = i.job_name
c1,c2=i.job_desc.split(':')
if eName in eCheck:
eCheck[eName].append(i.job_type+':'+str(i.id))
else:
eCheck[eName]=[c1,c2,i.job_start,i.job_type+':'+str(i.id)]
#sort the methods
for e in eCheck:
eCheck[e][3:6] = sorted(eCheck[e][3:6])
#exampleData[i.job_desc]=[c1,c2,i.job_start]
logger.debug(eCheck)
context = {'s': j, 'exampleData':eCheck, 'form1': form1, 'form2': form2, 'form_sem':form_sem, 'form_pub':form_pub, 'nbar': 'home'}
return render_to_response('index.html', context, context_instance=RequestContext(request))
@cache_page(None)
def articleDetails(request,num):
session = driver.session()
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In article details")
resID=num
logger.debug(userInfo+str(resID))
q = SearchSet.objects.get(id=resID)
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed) where s.name = '"+q.job_name+"_"+q.user_id+"' return p.dp,p.issn;"
logger.debug(userInfo+"gCom:"+gCom)
years = set()
yearCounts = defaultdict(dict)
for res in session.run(gCom):
if type(res[0]) != type(None):
#logger.debug(res)
y = res[0].split(" ")[0]
j = res[1]
if type(y) != type(None) and type(j) != type(None) and y != '':
y = int(y)
j = j.encode("ascii")
years.add(y)
if y in yearCounts:
if j in yearCounts[y]:
yearCounts[y][j]+=1
else:
yearCounts[y][j]=1
else:
yearCounts[y][j]=1
#logger.debug(years)
article_data=[]
if len(years)>0:
years = range(min(years),max(years)+1)
logger.debug(years)
#logger.debug(len(yearCounts))
#'1995': {'1040-872X': 1, '0090-3493': 2
jTotals = {}
for i in yearCounts:
#logger.debug('i = '+str(i))
for j in yearCounts[i]:
if j in jTotals:
jTotals[j] = jTotals[j]+1
else:
jTotals[j]=1
jTotals[j]
#logger.debug(str(j)+":"+str(yearCounts[i][j]))
numTopJ = 10
topJs = dict(sorted(jTotals.items(), key=operator.itemgetter(1),reverse=True)[0:numTopJ])
#logger.debug(topJs)
#create top counts
topCounts = defaultdict(dict)
for i in years:
topCounts[i]['Other']=0
for j in topJs:
if i in yearCounts:
if j in yearCounts[i]:
topCounts[i][j] = yearCounts[i][j]
else:
topCounts[i][j] = 0
else:
topCounts[i][j] = 0
#logger.debug(topCounts)
#add counts not in the top set as 'Other'
for i in yearCounts:
for j in yearCounts[i]:
if j not in topCounts[i]:
topCounts[int(i)]['Other'] += yearCounts[i][j]
#logger.debug(topCounts)
#convert ISSN to name
iList = []
for i in topJs:
iList.append(i)
iName = issn_to_name(iList)
topJs['Other']=0
for t in topJs:
if t in iName:
a = {'name':iName[t],'data':[]}
else:
a = {'name':t,'data':[]}
for i in topCounts:
a['data'].append(topCounts[i][t])
article_data.append(a)
#logger.debug(article_data)
context = {'years':years,'aData':json.dumps(article_data),'ss':q.job_name,'nbar': 'results'}
session.close()
return render_to_response('articles.html', context, context_instance=RequestContext(request))
#@login_required
def jobs(request):
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In jobs")
context = {'nbar': 'results'}
return render_to_response('jobs.html', context, context_instance=RequestContext(request))
#@login_required
@cache_page(None)
def results(request,num):
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In results")
resID=num
logger.debug(userInfo+str(resID))
#find out if it's a shared result
uuid_regex = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
r = re.match(uuid_regex,num)
userStatus = 'user'
if r:
logger.debug('Results page URL is a UUID')
q = Compare.objects.get(hash_id=resID)
#set resID back to ID
resID=q.id
if str(q.user_id) != str(request.user.id) and q.share==False:
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+str(q.user_id))
return HttpResponseRedirect('/')
elif str(q.user_id) != str(request.user.id) and q.share==True:
userStatus = 'guest'
else:
q = Compare.objects.get(id=resID)
if str(q.user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+str(q.user_id))
return HttpResponseRedirect('/')
shareStatus = q.share
jobDir = ""
d1 = {}
d2 = {}
#cor_pval=1e-5
jobDir = q.job_name
d = str(q.job_name)
#get hash_id
hash_id = q.hash_id
#get year end
year2 = int(q.year_range.split('-')[1].strip())-1
#check for single sarch set jobs
if '_' in d:
#get search set names
s1_name = q.job_desc.split(":")[0]
s2_name = q.job_desc.split(":")[1]
logger.debug(userInfo+"two ss results")
if q.job_type == 'Temmpo':
#o = Overlap.objects.filter(mc_id=resID)
#o_json = []
#for i in o:
# dic = {}
# dic['uniq_a']=i.uniq_a
# dic['uniq_b']=i.uniq_b
# o_json.append(dic)
#o_json = json.dumps(o_json)
#o_json = serializers.serialize('json', o, fields=('name','uniq_a','uniq_b'))
context={'hash_id':hash_id, 'res':resID,'resA':d1,'resB':d2, 'nbar': 'results', 's1_name':s1_name, 's2_name':s2_name, 'year2':year2,'userStatus':userStatus,'shareStatus':shareStatus}
else:
o = Overlap.objects.filter(mc_id=resID).count()
#get semmed concepts
cFile=DATA_FOLDER+'SRDEF.txt'
infile = open(cFile, 'r')
cDic = {}
for line in infile:
if not line.startswith("#"):
cDic[(line.split("|")[0])]=line.split("|")[1]
#convert to JSON
cDic_json = json.dumps(cDic)
#check if files exist
f=tmpDir + 'saved_data/fet/' + str(d.split("_")[0]) + '_'+str(year2+1) + '.' + q.job_type + '.fet.gz'
logger.debug('Reding data from '+f)
if os.path.isfile(f):
with gzip.open(f, 'rb') as f:
next(f)
for line in f:
l = line.rstrip('\n').encode("ascii").split("\t")
#if float(l[7]) <= cor_pval:
d1[l[0]] = ["{:,}".format(int(l[1])) + "/" + "{:,}".format(int(l[2])),
"{:,}".format(int(float(l[3]))) + "/" + "{:,}".format(int(float(l[4]))), ("%4.2f" % float(l[5])),
("%03.02e" % float(l[6])), ("%03.02e" % float(l[7]))]
f=tmpDir + 'saved_data/fet/' + str(d.split("_")[1]) + '_'+str(year2+1) + '.' + q.job_type + '.fet.gz'
if os.path.isfile(f):
with gzip.open(f, 'rb') as f:
next(f)
for line in f:
l = line.rstrip('\n').encode("ascii").split("\t")
#if float(l[7]) <= cor_pval:
d2[l[0]] = ["{:,}".format(int(l[1])) + "/" + "{:,}".format(int(l[2])),
"{:,}".format(int(float(l[3]))) + "/" + "{:,}".format(int(float(l[4]))), ("%4.2f" % float(l[5])),
("%03.02e" % float(l[6])), ("%03.02e" % float(l[7]))]
# d['pTotal']="{:,}".format(int(r[3]))
context={'hash_id':hash_id, 'res':resID,'resA':d1,'resB':d2, 'nbar': 'results', 's1_name':s1_name, 's2_name':s2_name, 'overlap':o,'year2':year2,'cDic':cDic_json,'userStatus':userStatus,'shareStatus':shareStatus}
if q.job_type == 'meshMain':
return render_to_response('mesh.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_t' or q.job_type == 'semmed':
return render_to_response('semmed.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_c':
return render_to_response('semmed_c.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_c':
return render_to_response('semmed_c.html', context, context_instance=RequestContext(request))
elif q.job_type == 'Temmpo':
return render_to_response('temmpo_res.html', context, context_instance=RequestContext(request))
else:
logger.debug(userInfo+"single ss results")
f=tmpDir + 'saved_data/fet/' + str(d) + '_'+str(year2+1)+ '.' + q.job_type + '.fet.gz'
if os.path.isfile(f):
with gzip.open(f, 'rb') as f:
next(f)
for line in f:
l = line.rstrip('\n').encode("ascii").split("\t")
#if float(l[7]) <= cor_pval:
d1[l[0]] = ["{:,}".format(int(l[1])) + "/" + "{:,}".format(int(l[2])),
"{:,}".format(float(l[3])) + "/" + "{:,}".format(float(l[4])), ("%4.2f" % float(l[5])),
("%03.02e" % float(l[6])), ("%03.02e" % float(l[7]))]
context={'hash_id':hash_id, 'res':resID,'resA':d1, 'nbar': 'results','s1_name':q.job_desc,'year2':year2,'userStatus':userStatus,'shareStatus':shareStatus}
if q.job_type == 'meshMain':
return render_to_response('mesh_single.html', context, context_instance=RequestContext(request))
elif q.job_type == 'semmed_t' or q.job_type == 'semmed_c':
return render_to_response('semmed_single.html', context, context_instance=RequestContext(request))
class OrderListJson(BaseDatatableView):
# The model we're going to show
model=Compare
columns = ['user_id', 'job_name', 'job_desc']
order_columns = ['user_id']
max_display_length = 500
def get_initial_queryset(self):
return Compare.objects
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
for item in qs:
json_data.append([
'fish',
item.user_id,
item.job_name,
item.job_desc,
])
return json_data
class ajax_searchset(BaseDatatableView):
#get the user id
#user_id = 'None'
#def __init__(self, *args, **kwargs):
# self.request = kwargs.pop('request', None)
# super(ajax_searchset, self).__init__(*args, **kwargs)
# The model we're going to show
#model=SearchSet
#model=SearchSet.objects.filter(user_id=str(2))
columns = ['job_name', 'ss_desc', 'job_start', 'job_status','job_progress','id']
order_columns = ['job_name', 'ss_desc', 'job_start', 'job_status','job_progress','id']
max_display_length = 500
def get_initial_queryset(self):
user_id = self.request.user.id
return SearchSet.objects.filter(user_id=str(user_id))
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
for item in qs:
json_data.append([
item.job_name,
item.ss_desc,
item.job_start,
item.job_status,
item.job_progress,
item.id
])
return json_data
class ajax_compare(BaseDatatableView):
# The model we're going to show
model=Compare
#model=SearchSet.objects.filter(user_id=str(request.user.id))
columns = ['job_desc','job_type','job_start', 'job_status','job_progress','id']
order_columns = ['job_desc','job_type','job_start', 'job_status','job_progress','']
max_display_length = 500
def get_initial_queryset(self):
user_id = self.request.user.id
return Compare.objects.filter(user_id=str(user_id))
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
for item in qs:
job_desc = item.job_desc
if item.year_range != '1950 - 2019':
year1 = item.year_range.split('-')[0].strip()
year2 = int(item.year_range.split('-')[1].strip())-1
#logger.debug('y1:'+year1+' y2:'+str(year2))
#year2 = int(item.year_range.split('-')[1].strip())+1
job_desc = job_desc+' ('+year1+'-'+str(year2)+')'
json_data.append([
job_desc,
item.job_type,
item.job_start,
item.job_status,
item.job_progress,
item.id
])
return json_data
class ajax_overlap(BaseDatatableView):
# The model we're going to show
model=Overlap
#model=SearchSet.objects.filter(user_id=str(request.user.id))
columns = ['name', 'uniq_a','uniq_b','shared','score','mean_cp','mean_odds','treeLevel','id']
order_columns = ['name', 'uniq_a','uniq_b','shared','score','mean_cp','mean_odds','treeLevel','id']
max_display_length = 500
def get_initial_queryset(self):
#resID = 926
#user_id = self.request.user.id
resID = self.request.GET.get('resID',None)
logger.debug('resID: '+resID)
return Overlap.objects.filter(mc_id=resID)
def filter_queryset(self, qs):
logger.debug('filter_queryset')
# use request parameters to filter queryset
# using standard filter
search = self.request.GET.get(u'search[value]', None)
if search:
search = search
logger.debug('Searching with filter '+search)
qs = qs.filter(name__icontains=search)
#get analysis type
aType = self.request.GET.get('t', None)
logger.debug('Filter query on '+aType)
#filter using negative search terms
negVals = self.request.GET.get('n',None)
if negVals:
negVals = json.loads(negVals)
#deal with thml
negVals = HTMLParser.HTMLParser().unescape(negVals)
#logger.debug('nVals = '+str(negVals))
if aType == 'semmed':
for i in negVals:
if len(negVals[i])>0:
#neg = negVals[i]
#negList = negVals[i].replace('(','\(').replace(')','\)').split('||')
negList = negVals[i].split('||')
logger.debug(i+":"+str(negList))
if i == 's1':
qs = qs.exclude(name1__in=negList)
elif i == 's2':
qs = qs.exclude(name2__in=negList)
elif i == 's3':
qs = qs.exclude(name3__in=negList)
elif i == 's4':
qs = qs.exclude(name4__in=negList)
elif i == 's5':
qs = qs.exclude(name5__in=negList)
else:
if len(negVals)>0:
negVals = negVals.replace('(','\(').replace(')','\)')
logger.debug('filtering on negVals '+negVals)
qs = qs.exclude(name__iregex=r''+negVals+'')
negList = negVals.split('||')
#filter using positive search terms
posVals = self.request.GET.get('p',None)
if posVals:
posVals = json.loads(posVals)
posVals = HTMLParser.HTMLParser().unescape(posVals)
#logger.debug('pVals = '+str(posVals))
if aType == 'semmed':
for i in posVals:
if len(posVals[i])>0:
#p = posVals[i]
#posList = posVals[i].replace('(','\(').replace(')','\)').split('||')
posList = posVals[i].split('||')
#logger.debug(i+":"+p)
if i == 's1':
qs = qs.filter(name1__in=posList)
elif i == 's2':
qs = qs.filter(name2__in=posList)
elif i == 's3':
qs = qs.filter(name3__in=posList)
elif i == 's4':
qs = qs.filter(name4__in=posList)
elif i == 's5':
qs = qs.filter(name5__in=posList)
#reg = r'^'+r1+'\|\|'+r2+'\|\|'+r3+'\|\|'+r4+'\|\|'+r5
#logger.debug(reg)
#qs = qs.filter(name__iregex=r''+reg+'')
else:
if len(posVals)>0:
posVals = posVals.replace('(','\(').replace(')','\)')
#posList = posVals.split('||')
logger.debug('filtering on posVals ' +posVals)
qs = qs.filter(name__iregex=r''+posVals+'')
#qs = qs.filter(name__in=posList)
#filter using sliders
pval = self.request.GET.get('pval',None)
odds = self.request.GET.get('odds',None)
pfr = self.request.GET.get('pfr',None)
#logger.debug('pval:'+pval+' odds:'+odds+' pfr:'+pfr)
if pval and pval != 'NaN':
qs = qs.filter(mean_cp__lte=pval)
if odds and odds != 'NaN':
qs = qs.filter(mean_odds__gte=odds)
if pfr and pfr != 'NaN':
qs = qs.filter(treeLevel__gte=pfr)
logger.debug('len(qs)='+str(len(qs)))
return qs
def prepare_results(self, qs):
# prepare list with output column data
# queryset is already paginated here
json_data = []
#top = self.request.GET.get('top',None)
#logger.debug('top:'+top)
#tCount=0
#get SemMedDB concept terms
aType = self.request.GET.get('t', None)
if aType == 'semmed':
#Milk||PART_OF||Breast||261943:Breast||LOCATION_OF||Diphosphonates||10722541
# termDic = {}
# termSet = Set()
#
# for item in qs:
# #s = item.name.split("||")
# termSet.add(item.name1)
# termSet.add(item.name3)
# termSet.add(item.name5)
# termString = ', '.join('"' + item + '"' for item in termSet)
# logger.debug('termString = '+str(termString))
# session = driver.session()
#
# gCom = "match (s:SDB_item) where s.name in ["+termString+"] return s.name,s.type";
# logger.debug("gCom:"+gCom)
# for res in session.run(gCom):
# if res["s.name"] in termDic:
# a=termDic[res["s.name"]]
# termDic[res["s.name"]] = a+","+res["s.type"]
# else:
# termDic[res["s.name"]]=res["s.type"]
# logger.debug(termDic)
for item in qs:
#create type string
#s = item.name.split("||")
#ts = termDic[s[0]]+"||"+termDic[s[2]]+"||"+termDic[s[5]]
#if tCount<int(top):
json_data.append([
item.name,
item.uniq_a,
item.uniq_b,
item.shared,
item.score,
item.mean_cp,
item.mean_odds,
item.treeLevel,
item.id,
#ts
])
#tCount+=1
elif aType == 'semmed_c':
# termDic = {}
# termSet = Set()
#
# for item in qs:
# #s = item.name.split("||")
# termSet.add(item.name.split(":")[0])
# termString = ', '.join('"' + item + '"' for item in termSet)
# logger.debug('termString = '+str(termString))
# session = driver.session()
#
# gCom = "match (s:SDB_item) where s.name in ["+termString+"] return s.name,s.type";
# logger.debug("gCom:"+gCom)
# for res in session.run(gCom):
# name = res["s.name"].split(":")[0]
# if name in termDic:
# a=termDic[name]
# termDic[name] = a+","+res["s.type"]
# else:
# termDic[name]=res["s.type"]
# logger.debug(termDic)
for item in qs:
#create type string
s = item.name.split(":")[0]
#ts = termDic[s]
#if tCount<int(top):
json_data.append([
s,
item.uniq_a,
item.uniq_b,
item.shared,
item.score,
item.mean_cp,
item.mean_odds,
item.treeLevel,
item.id,
#ts
])
else:
for item in qs:
#if tCount<int(top):
json_data.append([
item.name,
item.uniq_a,
item.uniq_b,
item.shared,
item.score,
item.mean_cp,
item.mean_odds,
item.treeLevel,
item.id
])
return json_data
@cache_page(None)
def pubSingle(request,num):
session = driver.session()
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In pubSingle")
[p_id,c_id,s_id] = num.split("_")
logger.debug(p_id+' : '+c_id+' : '+s_id)
#o = Overlap.objects.get(pk=p_id)
#m = o.mc_id
#logger.debug(m)
c = Compare.objects.get(pk=c_id)
#get year range data
year1 = c.year_range.split("-")[0].strip()
year2 = c.year_range.split("-")[1].strip()
logger.debug('year1 = '+year1+' year2 = '+year2)
yearString = ''
if year1 != '1960' or year2 != '2019':
yearString = "p.dcom >= '"+year1+"' and p.dcom <= '"+year2+"' and"
#check user ids match
if str(c.user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+c.user_id)
return HttpResponseRedirect('/')
ss = ''
if s_id == '1':
ss = c.job_desc.split(":")[0].strip()+"_"+c.user_id
else:
ss=c.job_desc.split(":")[1].strip()+"_"+c.user_id
jobType = c.job_type
if jobType == "meshMain":
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:HAS_MESH{mesh_type:'main'}]->(m:Mesh) where "+yearString+" s.name = '"+ss+"' and m.mesh_id = '"+p_id.replace(':','/')+"' return s.name,p.pmid,p.dcom,m.mesh_name as sname;"
elif jobType == "semmed_t" or jobType == 'semmed':
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:SEM]->(sdb:SDB_triple) where "+yearString+" s.name = '"+ss+"' and sdb.pid = "+p_id+" return s.name,p.pmid,p.dp,sdb.s_name as sname;"
elif jobType == "semmed_c":
gCom = "match (s:SearchSet)-[r:INCLUDES]-(p:Pubmed)-[:SEM]-(st:SDB_triple)-[:SEMS|:SEMO]-(si:SDB_item) where "+yearString+" s.name = '"+ss+"' and si.name = '"+p_id+"' return s.name,p.pmid,p.dcom,si.name as sname;"
logger.debug(userInfo+"gCom:"+gCom)
pAllDic = {}
pDic = {}
pmidList = []
for res in session.run(gCom):
ss=res[0].encode("ascii")
pm=str(res[1])
pd=res[2].encode("ascii")
pmidList.append(pm)
sName = res['sname']
pAllDic[pm] = pd
if ss in pDic:
a = pDic[ss]
if pm not in a:
a.append(pm)
else:
pDic[ss] = [pm]
#get titles
ptDic,pjDic = pmid_to_info(pmidList)
for i in pAllDic:
a = pAllDic[i]
t = 'n/a'
j = 'n/a'
if i in ptDic:
t = ptDic[i]
if i in pjDic:
j = pjDic[i]
b = (t,j,a)
pAllDic[i] = b
#print pDic
#logger.debug(userInfo+"pDic:"+str(pDic))
sDic = {}
sList = list()
for i in pDic[ss]:
e = {'pmid':i}
sList.append(e)
ss_name = ss.rsplit("_",1)[0]
#logger.debug(sList)
context = {'sList':sList,'ss_name':ss_name, 'tab':'single','mName':sName, 'pAllDic':pAllDic, 'nbar': 'results'}
session.close()
return render_to_response('pubs_single.html', context, context_instance=RequestContext(request))
@cache_page(None)
def pubDetails(request,num):
session = driver.session()
userInfo = "UserID:"+str(request.user.id)+" - "
logger.debug(userInfo+"In pubDetails")
p_id = num.split("_")[0]
tab = num.split("_")[1]
if tab == '0':
tab = 's1'
elif tab == '1':
tab = 's2'
elif tab == '2':
tab = 'shared'
o = Overlap.objects.get(pk=p_id)
m = o.mc_id
logger.debug(m)
c = Compare.objects.get(pk=m.id)
#get year range data
year1 = c.year_range.split("-")[0].strip()
year2 = c.year_range.split("-")[1].strip()
logger.debug('year1 = '+year1+' year2 = '+year2)
yearString = ''
if year1 != '1960' or year2 != '2019':
yearString = "p.dcom >= '"+year1+"' and p.dcom <= '"+year2+"' and"
#check user ids match
if str(c.user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+c.user_id)
return HttpResponseRedirect('/')
ss1=c.job_desc.split(":")[0].strip()+"_"+c.user_id
ss2=c.job_desc.split(":")[1].strip()+"_"+c.user_id
mName = o.name.split(":")[0].split('(',1)[0].strip()
jobType = m.job_type
if jobType == "meshMain":
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:HAS_MESH{mesh_type:'main'}]->(m:Mesh) where "+yearString+" s.name in ['"+ss1+"','"+ss2+"'] and m.mesh_name = '"+mName+"' return s.name,p.pmid,p.dcom;"
elif jobType == "notMeshMain":
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:HAS_MESH]->(m:Mesh) where "+yearString+" s.name in ['"+ss1+"','"+ss2+"'] and m.mesh_name = '"+mName+"' return s.name,p.pmid,p.dcom;"
elif jobType == "semmed_t" or jobType == 'semmed':
sem_1_ID = o.name.split(":")[0].split("||")[3]
sem_2_ID = o.name.split(":")[1].split("||")[3]
t1 = o.name.split(":")[0].split("||")[0]
t2 = o.name.split(":")[0].split("||")[1]
t3 = o.name.split(":")[0].split("||")[2]
t4 = o.name.split(":")[1].split("||")[0]
t5 = o.name.split(":")[1].split("||")[1]
t6 = o.name.split(":")[1].split("||")[2]
logger.debug(t1+"|"+t6)
#if t1 == t6:
# mName = "(REVERSE) "+t4+" || "+t5+" || "+t6+" || "+t2+" || "+t3
#else:
mName = t1+" || "+t2+" || "+t3+" || "+t5+" || "+t6
gCom = "match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:SEM]->(sdb:SDB_triple) where "+yearString+" s.name = '"+ss1+"' and sdb.pid = "+sem_1_ID+" return s.name,p.pmid,p.dp " \
"UNION match (s:SearchSet)<-[r:INCLUDES]->(p:Pubmed)<-[h:SEM]->(sdb:SDB_triple) where "+yearString+" s.name = '"+ss2+"' and sdb.pid = "+sem_2_ID+" return s.name,p.pmid,p.dp;"
elif jobType == "semmed_c":
gCom = "match (s:SearchSet)-[r:INCLUDES]-(p:Pubmed)-[:SEM]-(st:SDB_triple)-[:SEMS|:SEMO]-(si:SDB_item) where "+yearString+" s.name in ['"+ss1+"','"+ss2+"'] and si.name = '"+mName+"' return s.name,p.pmid,p.dcom;"
logger.debug(userInfo+"gCom:"+gCom)
pAllDic = {}
pDic = {}
pmidList = []
for res in session.run(gCom):
ss=res[0].encode("ascii")
pm=str(res[1])
pd=res[2].encode("ascii")
pmidList.append(pm)
pAllDic[pm] = pd
if ss in pDic:
a = pDic[ss]
if pm not in a:
a.append(pm)
else:
pDic[ss] = [pm]
#get titles
ptDic,pjDic = pmid_to_info(pmidList)
for i in pAllDic:
a = pAllDic[i]
t = 'n/a'
j = 'n/a'
if i in ptDic:
t = ptDic[i]
if i in pjDic:
j = pjDic[i]
b = (t,j,a)
pAllDic[i] = b
#print pDic
#logger.debug(userInfo+"pDic:"+str(pDic))
sDic = {}
s1List = list()
s2List = list()
shareList = list()
for i in pDic:
j1 = pDic[ss1]
j2 = pDic[ss2]
sDic['o'] = list(set(j1).intersection(j2))
sDic[ss1] = list(set(j1) - set(j2))
sDic[ss2] = list(set(j2) - set(j1))
if 'o' in sDic:
for i in sDic['o']:
e = {'pmid':i}
shareList.append(e)
if ss1 in sDic:
for i in sDic[ss1]:
e = {'pmid':i}
s1List.append(e)
if ss2 in sDic:
for i in sDic[ss2]:
e = {'pmid':i}
s2List.append(e)
ss1_name = ss1.rsplit("_",1)[0]
ss2_name = ss2.rsplit("_",1)[0]
context = {'s1':s1List,'s2':s2List,'share':shareList,'ss1':ss1_name, 'ss2':ss2_name, 'tab':tab,'mName':mName, 'pAllDic':pAllDic, 'nbar': 'results'}
session.close()
return render_to_response('pubs.html', context, context_instance=RequestContext(request))
def get_task_status(task_id):
# If you have a task_id, this is how you query that task
#print "in get_task_status"
#print task_id
task = db_citations.AsyncResult(task_id)
status = task.status
progress = 0
stage=""
if status == 'SUCCESS':
progress = 100
stage = 'Complete'
elif status == 'FAILURE':
#progress = 0
stage = "Failed"
elif status == 'PROGRESS':
progress = task.info['progress']
stage = task.info['stage']
return {'status': status, 'progress': progress, 'stage':stage}
def ajax_share(request):
resID = request.GET['resID']
status = request.GET['status']
if status == 'True':
logger.debug('Sharing results - '+resID)
Compare.objects.filter(hash_id=resID).update(share=True)
else:
logger.debug('Unsharing results - '+resID)
Compare.objects.filter(hash_id=resID).update(share=False)
#SearchSet.objects.filter(job_name=job_name,user_id=user_id).update(job_status='Adding pubmed data',job_progress=10)
mimetype = 'application/json'
context={}
return HttpResponse(json.dumps(context), mimetype)
def export_to_csv(request, queryset, fields, resID):
fName = 'melodi_result_'+str(resID)+'.csv'
output = StringIO.StringIO() ## temp output file
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'attachment;filename='+fName+'.zip'
writer = csv.writer(output, dialect='excel')
writer.writerow(fields)
for obj in queryset:
writer.writerow([getattr(obj, f) for f in fields])
z = zipfile.ZipFile(response,'w') ## write zip to response
z.writestr(fName, output.getvalue()) ## write csv file to zip
return response
def download_result(request):
resID = request.POST.get('resID')
type = request.POST.get('type')
res_type = request.POST.get('download_res')
logger.debug('Downloading - '+str(resID)+' : '+type+ ' : '+res_type)
resID = request.POST.get('resID')
type = request.POST.get('type')
qs = Overlap.objects.filter(mc_id_id=resID)
# using standard filter
#search = request.GET.get(u'search[value]', None)
#if search:
# logger.debug('Searching with filter '+search)
# qs = qs.filter(name__icontains=search)
#get analysis type
#aType = request.GET.get('t', None)
#logger.debug('Filter query on '+aType)
if res_type == 'filt':
logger.debug('Downloading filtered - '+str(resID)+' : '+type)
#filter using negative search terms
negVals = request.POST.get('filt_results_n',None)
logger.debug(negVals)
if negVals:
negVals = json.loads(negVals)
#deal with thml
negVals = HTMLParser.HTMLParser().unescape(negVals)
#logger.debug('nVals = '+str(negVals))
if type == 'st':
for i in negVals:
if len(negVals[i])>0:
#neg = negVals[i]
negList = negVals[i].split('||')
#logger.debug(i+":"+str(negList))
if i == 's1':
qs = qs.exclude(name1__in=negList)
elif i == 's2':
qs = qs.exclude(name2__in=negList)
elif i == 's3':
qs = qs.exclude(name3__in=negList)
elif i == 's4':
qs = qs.exclude(name4__in=negList)
elif i == 's5':
qs = qs.exclude(name5__in=negList)
else:
if len(negVals)>0:
logger.debug('filtering on negVals '+negVals)
qs = qs.exclude(name__iregex=r''+negVals+'')
negList = negVals.split('||')
#filter using positive search terms
posVals = request.POST.get('filt_results_p', None)
if posVals:
posVals = json.loads(posVals)
posVals = HTMLParser.HTMLParser().unescape(posVals)
# logger.debug('pVals = '+str(posVals))
if type == 'st':
for i in posVals:
if len(posVals[i]) > 0:
# p = posVals[i]
posList = posVals[i].split('||')
# logger.debug(i+":"+p)
if i == 's1':
qs = qs.filter(name1__in=posList)
elif i == 's2':
qs = qs.filter(name2__in=posList)
elif i == 's3':
qs = qs.filter(name3__in=posList)
elif i == 's4':
qs = qs.filter(name4__in=posList)
elif i == 's5':
qs = qs.filter(name5__in=posList)
# reg = r'^'+r1+'\|\|'+r2+'\|\|'+r3+'\|\|'+r4+'\|\|'+r5
# logger.debug(reg)
# qs = qs.filter(name__iregex=r''+reg+'')
else:
if len(posVals) > 0:
posList = posVals.split('||')
logger.debug('filtering on posVals')
# qs = qs.filter(name__iregex=r''+posVals+'')
qs = qs.filter(name__in=posList)
# filter using sliders
pval = request.POST.get('filt_results_pval', None)
odds = request.POST.get('filt_results_odds', None)
pfr = request.POST.get('filt_results_pfr', None)
logger.debug('pval:'+pval+' odds:'+odds+' pfr:'+pfr)
if pval and pval != 'NaN':
qs = qs.filter(mean_cp__lte=pval)
if odds and odds != 'NaN':
qs = qs.filter(mean_odds__gte=odds)
if pfr and pfr != 'NaN':
qs = qs.filter(treeLevel__gte=pfr)
logger.debug('len(qs)=' + str(len(qs)))
#remove ids names
if type == 'st':
return export_to_csv(request, qs, fields = ('name1', 'name2', 'name3', 'name4', 'name5', 'mean_cp', 'mean_odds', 'uniq_a', 'uniq_b', 'shared', 'score', 'treeLevel'), resID=resID)
elif type == 'mesh':
for c in qs:
c.name = c.name.rsplit(":",1)[0]
return export_to_csv(request, qs, fields = ('name', 'mean_cp', 'mean_odds', 'uniq_a', 'uniq_b', 'shared', 'score', 'treeLevel'), resID=resID)
elif type == 'sc':
for c in qs:
c.name = c.name.rsplit(":",1)[0]
return export_to_csv(request, qs, fields = ('name', 'mean_cp', 'mean_odds', 'uniq_a', 'uniq_b', 'shared', 'score'), resID=resID)
def download_filter(request):
fList = request.POST.get('fList')
resID = request.POST.get('resID')
fType = request.POST.get('fType')
if fList != type(None) and len(fList)>0:
response = HttpResponse(fList, content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename="%s"' % resID+fType+'-filter.txt'
return response
def upload_filter(request):
logger.debug('uploading filter file')
context={}
return HttpResponse(json.dumps(context), 'application/json')
def save_filter(request):
logger.debug('saving filters')
resID = request.GET.get('resID')
com = Compare.objects.get(pk=resID)
type = request.GET.get('type')
negVals = json.loads(request.GET.get('nsTerm'))
posVals = json.loads(request.GET.get('psTerm'))
logger.debug('resID : ' +resID+" type : "+type)
logger.debug('nsTerm ' +str(negVals))
logger.debug('psTerm ' +str(posVals))
fCount=0
if type == 'st':
for i in negVals:
if len(negVals[i])>0:
neg = negVals[i]
logger.debug(i+":"+neg)
loc = int(i[1])
f=Filters(com_id=com.id,version=1,type=type,num=fCount,value=neg,location=loc,ftype='neg')
f.save()
fCount+=1
context={}
return HttpResponse()
def ajax_delete(request):
logger.debug('user_id = '+str(request.user.id))
if str(request.user.id) == 'None':
logger.debug('Someone is trying to delete the demo data!')
else:
session = driver.session()
id = request.GET['id']
type = request.GET['type']
logger.debug('Deleting id '+id+' for type '+type)
if type == 'AS':
s = SearchSet.objects.get(pk=id)
user_id = s.user_id
name = s.job_name
#check user ids match
if str(user_id) != str(request.user.id):
logger.debug('wrong user access - user id = '+str(request.user.id)+' data id = '+user_id)
#return HttpResponseRedirect('/')
else:
#delete from mysql
s.delete()
Compare.objects.filter(job_name__contains=id+'_').delete()
Compare.objects.filter(job_name=id).delete()
#delete from neo4j
com="match (s:SearchSet)-[r]-(p:Pubmed) where s.name = '"+name+"_"+user_id+"' delete s,r;"
logger.debug(com)
session.run(com)
session.close()
#delete FET data
com = 'rm -r '+tmpDir+'saved_data/fet/'+id+'_*'
logger.debug(com)
subprocess.call(com, shell=True)
return HttpResponse()
def temmpo(request):
if str(request.user.id) == 'None':
return HttpResponseRedirect('/')
else:
s=SearchSet.objects.filter(user_id=str(request.user.id),job_status='Complete')
context = {'s': s}
return render_to_response('temmpo.html', context, context_instance=RequestContext(request))
def temmpo_res(request):
if str(request.user.id) == 'None':
return HttpResponseRedirect('/')
else:
user = str(request.user.id)
logger.debug('user = '+user)
as1 = request.POST.get('as1')
as2 = request.POST.get('as2')
s1=SearchSet.objects.get(job_name=as1,user_id=str(request.user.id))
s2=SearchSet.objects.get(job_name=as2,user_id=str(request.user.id))
int_file = request.FILES['intFile']
intData = int_file.read().replace("\n","','")[:-2]
#save to file
#fileStore = '/tmp/'+str(int_file)
logger.debug("Running temmpo style analysis on "+str(as1)+" and "+str(as2))
jobDesc = as1+" : "+as2
jobName = str(s1.id)+"_"+str(s2.id)+"_2019"
try:
jCheck = Compare.objects.get(job_name=jobName, job_desc=jobDesc+" : "+str(int_file), user_id=str(request.user.id),job_type='Temmpo')
# delete entry if not complete and resubmitted
if jCheck.job_progress != 100:
jCheck.delete()
jCheck = False
except ObjectDoesNotExist:
jCheck = False
if jCheck == False:
q = Compare(user_id=str(request.user.id), job_desc=jobDesc+" : "+str(int_file), year_range='1950 - 2019', job_name=jobName, job_start=time.strftime("%Y-%m-%d %H:%M:%S"), job_status='Pending',job_type='Temmpo',job_progress=0)
q.save()
j=temmpo_task.delay(q.id,intData)
return HttpResponseRedirect(reverse('jobs'))
| MRCIEU/melodi | browser/views.py | Python | mit | 51,244 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 12:55:05 2015
@author: mannion2
"""
import scipy as sp
import numpy as np
import random as r
import matplotlib.pyplot as plt
from bisect import bisect_left
check = 1
def Choice(cdf,x):
''' Returns the the chosen value (corresponding to the CDF) that are closest to the choice random value (ranging from 0 to 1)'''
choice = r.random()
pos = bisect_left(cdf,choice)
if pos == 0:
pos_next = 1
if pos == len(cdf) or pos == len(cdf)-1:
pos = len(cdf)-1
pos_next = len(cdf)-2
else:
if abs(choice-cdf[pos-1]) < abs(choice-cdf[pos+1]):
pos_next = pos-1
else:
pos_next = pos+1
''' linear interpolation between these points to esitmate the result '''
m = abs(cdf[pos]-cdf[pos_next])/abs(x[pos]-x[pos_next])
value = ((choice - cdf[pos])/m) + x[pos]
return value
| mannion9/Intro-to-Python | Specific Methods/Normalizing-a-PDF.py | Python | mit | 930 |
"""
test_zipballs.py: tests related to core.zipballs module
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import os
try:
from unittest import mock
except:
import mock
import pytest
from librarian.core import zipballs as mod
@mock.patch.object(mod.metadata, 'process_meta')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
@mock.patch.object(mod.json, 'load')
def test_get_metadata(load, ZipFile, process_meta):
z = ZipFile('foo.zip')
path = 'foo'
ret = mod.get_metadata(z, path)
z.open.assert_called_once_with('foo/info.json')
zcontext = z.open.return_value.__enter__ # open is a contenxt manager
load.assert_called_once_with(zcontext.return_value, 'utf8')
process_meta.assert_called_once_with(load.return_value)
assert ret == process_meta.return_value
def test_validate_no_path(*ignored):
""" If path is empty or None, raise ValidationError """
for path in ('', None):
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod.os.path, 'exists')
def test_validate_path_does_not_exists(exists):
""" If path does not exist, raise ValidationError """
path = '/var/spool/downloads/foo.txt'
exists.return_value = False
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
def test_validate_wrong_extension(*ignored):
""" If extension isn't .zip, returns False """
path = '/var/spool/downloads/foo.txt'
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
def test_validate_zipball_wrong_name(*ignored):
""" If filename isn't MD5 hash, returns False """
path = '/var/spool/downloads/content/foo.zip'
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
def test_validate_zipball_not_zipfile(is_zipfile, *ignored):
""" If path does not point to a valid zipfile, returns False """
is_zipfile.return_value = False
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_has_no_md5_dir(ZipFile, is_zipfile, *ignored):
""" If zipball doesn't contain md5 directory, returns False """
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = []
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_contains_non_dir_md5(ZipFile, is_zipfile, *ignored):
""" If zipball contains md5 path that isn't a dir, returns False """
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544']
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_contains_info_json(ZipFile, is_zipfile,
get_metadata):
""" If zipball doesn't contain info.json, returns False """
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/']
get_metadata.side_effect = KeyError()
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_no_valid_meta(ZipFile, is_zipfile, get_metadata):
"""
If zipball doesn't contain metadata that can be parsed, returns False
"""
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/']
get_metadata.side_effect = ValueError()
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_invalid_meta(ZipFile, is_zipfile, get_metadata):
""" If zipball doesn't contain valid metadata keys, returns False """
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/',
'202ab62b551f6d7fc002f65652525544/index.html']
get_metadata.side_effect = mod.metadata.MetadataError('msg', {})
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_contains_index_html(ZipFile, is_zipfile,
get_metadata, metadata):
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/']
get_metadata.return_value = metadata
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
def test_validate_zipball_contains_wrong_index_html(ZipFile, is_zipfile,
get_metadata, metadata):
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/',
'202ab62b551f6d7fc002f65652525544/index.html']
get_metadata.return_value = {'index': 'foo/index.html'}
get_metadata.return_value.update(metadata)
with pytest.raises(mod.ValidationError):
mod.validate(path)
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
@mock.patch.object(mod.os.path, 'exists')
def test_validate_zipball_valid(exists, ZipFile, is_zipfile, get_metadata,
metadata):
exists.return_value = True
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/',
'202ab62b551f6d7fc002f65652525544/index.html']
get_metadata.return_value = metadata
assert mod.validate(path) is get_metadata.return_value
@mock.patch.object(mod, 'get_metadata')
@mock.patch.object(mod.zipfile, 'is_zipfile')
@mock.patch.object(mod.zipfile, 'ZipFile', autospec=True)
@mock.patch.object(mod.os.path, 'exists')
def test_validate_zipball_valid_with_index(exists, ZipFile, is_zipfile,
get_metadata, metadata):
exists.return_value = True
is_zipfile.return_value = True
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
ZipFile.return_value.namelist.return_value = [
'202ab62b551f6d7fc002f65652525544/',
'202ab62b551f6d7fc002f65652525544/foo/index.html']
get_metadata.return_value = {'index': 'foo/index.html'}
get_metadata.return_value.update(metadata)
assert mod.validate(path) is get_metadata.return_value
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.os, 'rename')
def test_backup_returns_early_if_nothing_to_do(rename, exists):
exists.return_value = False
mod.backup('foo')
assert not rename.called
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.os, 'rename')
def test_backup_normalizes(rename, exists):
""" Backup always normalizes the path """
exists.return_value = True
mod.backup('\\foo\\bar\\baz')
expected = '/foo/bar/baz'.replace('/', os.sep)
rename.assert_called_once_with(expected, expected + '.backup')
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.os, 'rename')
def test_backup(rename, exists):
""" Backup moves path to a path with .backup suffix """
exists.return_value = True
mod.backup('foo')
rename.assert_called_once_with('foo', 'foo.backup')
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.os, 'rename')
def test_backup_returns_target_path(rename, exists):
""" The path of the backup file/dir is returned """
exists.return_value = True
assert mod.backup('foo') == 'foo.backup'
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.os, 'rename')
def test_backup_returns_none_if_no_backup_is_done(rename, exists):
""" The path of the backup file/dir is returned """
exists.return_value = False
assert mod.backup('foo') is None
@mock.patch.object(mod.shutil, 'rmtree')
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.shutil, 'move')
@mock.patch.object(mod, 'backup')
@mock.patch.object(mod.zipfile, 'ZipFile')
@mock.patch.object(mod.content, 'to_path')
def test_extract_success(to_path, ZipFile, backup, move, exists, rmtree):
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
target = '/srv/zipballs'
extract_path = '/srv/zipballs/202ab62b551f6d7fc002f65652525544'
content_path = '/srv/zipballs/202/ab6/2b5/51f/6d7/fc0/02f/656/525/255/44'
to_path.return_value = content_path
backup.return_value = '/path/to/cont.backup'
exists.return_value = True
assert mod.extract(path, target) == content_path
ZipFile.assert_called_once_with(path)
ZipFile.return_value.extractall.assert_called_once_with(target)
backup.assert_called_once_with(content_path)
move.assert_called_once_with(extract_path, content_path)
exists.assert_called_once_with(backup.return_value)
rmtree.assert_called_once_with(backup.return_value)
@mock.patch.object(mod.shutil, 'rmtree')
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.shutil, 'move')
@mock.patch.object(mod, 'backup')
@mock.patch.object(mod.zipfile, 'ZipFile')
@mock.patch.object(mod.content, 'to_path')
def test_extract_fail(to_path, ZipFile, backup, move, exists, rmtree):
path = '/var/spool/downloads/content/202ab62b551f6d7fc002f65652525544.zip'
target = '/srv/zipballs'
extract_path = '/srv/zipballs/202ab62b551f6d7fc002f65652525544'
content_path = '/srv/zipballs/202/ab6/2b5/51f/6d7/fc0/02f/656/525/255/44'
to_path.return_value = content_path
ZipFile.return_value.extractall.side_effect = OSError()
exists.return_value = True
with pytest.raises(OSError):
mod.extract(path, target)
ZipFile.assert_called_once_with(path)
ZipFile.return_value.extractall.assert_called_once_with(target)
exists.assert_called_once_with(extract_path)
rmtree.assert_called_once_with(extract_path)
assert not backup.called
assert not move.called
def test_get_zip_path():
md5 = '202ab62b551f6d7fc002f65652525544'
basedir = '/content/path/'
with mock.patch('os.path.exists') as exists:
exists.return_value = True
assert mod.get_zip_path(md5, basedir) == basedir + md5 + '.zip'
assert mod.get_zip_path('invalid', basedir) is None
@mock.patch.object(mod.content, 'filewalk')
@mock.patch.object(mod.zipfile, 'ZipFile')
def test_create(ZipFile, filewalk):
md5 = '202ab62b551f6d7fc002f65652525544'
basedir = '/content/path/'
path1 = basedir + '202/ab6/2b5/51f/6d7/fc0/02f/656/525/255/44/index.html'
path2 = basedir + '202/ab6/2b5/51f/6d7/fc0/02f/656/525/255/44/s/img.jpg'
filewalk.return_value = [path1, path2]
mocked_zip_obj = mock.Mock()
ctx_manager = mock.MagicMock()
ctx_manager.__enter__.return_value = mocked_zip_obj
ZipFile.return_value = ctx_manager
mod.create(md5, basedir)
mocked_zip_obj.write.assert_has_calls([
mock.call(path1, '202ab62b551f6d7fc002f65652525544/index.html'),
mock.call(path2, '202ab62b551f6d7fc002f65652525544/s/img.jpg')
])
def test_get_md5_from_path():
assert mod.get_md5_from_path('/path/to/mymd5.zip') == 'mymd5'
@mock.patch.object(mod.zipfile, 'ZipFile')
@mock.patch('__builtin__.open')
def test_get_file_error_opening(file_open, ZipFile):
file_open.side_effect = IOError()
with pytest.raises(mod.ValidationError):
mod.get_file('test/file.zip', 'image.jpg')
assert not ZipFile.called
@mock.patch.object(mod.zipfile, 'ZipFile')
@mock.patch('__builtin__.open')
def test_get_file_invalid_zip(file_open, ZipFile):
ZipFile.side_effect = mod.zipfile.BadZipfile()
with pytest.raises(mod.ValidationError):
mod.get_file('test/file.zip', 'image.jpg')
ZipFile.assert_called_once_with(file_open.return_value)
@mock.patch.object(mod.zipfile, 'ZipFile')
@mock.patch('__builtin__.open')
def test_get_file_read(file_open, ZipFile):
raw_file = mock.Mock()
file_open.return_value = raw_file
mocked_file = mock.Mock()
mocked_file.read.return_value = 'some content'
mocked_zipfile = mock.Mock()
mocked_zipfile.open.return_value = mocked_file
ZipFile.return_value = mocked_zipfile
result = mod.get_file('test/file.zip', 'file.ext', no_read=False)
assert result == 'some content'
assert raw_file.close.called
assert mocked_file.close.called
assert mocked_file.read.called
@mock.patch.object(mod.zipfile, 'ZipFile')
@mock.patch('__builtin__.open')
def test_get_file_no_read(file_open, ZipFile):
raw_file = mock.Mock()
file_open.return_value = raw_file
mocked_file = mock.Mock()
mocked_zipfile = mock.Mock()
mocked_zipfile.open.return_value = mocked_file
ZipFile.return_value = mocked_zipfile
result = mod.get_file('test/file.zip', 'file.ext', no_read=True)
assert result is mocked_file
assert not raw_file.close.called
assert not mocked_file.close.called
assert not mocked_file.read.called
| karanisverma/feature_langpop | tests/core/test_zipballs.py | Python | gpl-3.0 | 14,877 |
# -*- coding: latin-1 -*-
# Author: adaur <adaur.underground@gmail.com>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import cookielib
import re
import urllib
from requests.utils import dict_from_cookiejar
import sickrage
from sickrage.core.caches.tv_cache import TVCache
from sickrage.core.helpers import bs4_parser
from sickrage.providers import TorrentProvider
class XthorProvider(TorrentProvider):
def __init__(self):
super(XthorProvider, self).__init__("Xthor", "http://xthor.bz", True)
self.urls.update({
'search': "{base_url}/browse.php?search=%s%s".format(**self.urls)
})
self.cj = cookielib.CookieJar()
self.categories = "&searchin=title&incldead=0"
self.username = None
self.password = None
self.cache = TVCache(self, min_time=10)
def login(self):
if any(dict_from_cookiejar(sickrage.app.wsession.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'submitme': 'X'}
try:
response = sickrage.app.wsession.post(self.urls['base_url'] + '/takelogin.php', data=login_params,
timeout=30).text
except Exception:
sickrage.app.log.warning("Unable to connect to provider".format(self.name))
return False
if not re.search('donate.php', response):
sickrage.app.log.warning(
"Invalid username or password. Check your settings".format(self.name))
return False
return True
def search(self, search_params, age=0, ep_obj=None):
results = []
# check for auth
if not self.login():
return results
for mode in search_params.keys():
sickrage.app.log.debug("Search Mode: %s" % mode)
for search_string in search_params[mode]:
if mode != 'RSS':
sickrage.app.log.debug("Search string: %s " % search_string)
searchURL = self.urls['search'] % (urllib.quote(search_string), self.categories)
try:
data = sickrage.app.wsession.get(searchURL).text
results += self.parse(data, mode)
except Exception:
sickrage.app.log.debug("No data returned from provider")
return results
def parse(self, data, mode):
"""
Parse search results from data
:param data: response data
:param mode: search mode
:return: search results
"""
results = []
with bs4_parser(data) as html:
resultsTable = html.find("table", {"class": "table2 table-bordered2"})
if resultsTable:
rows = resultsTable.findAll("tr")
for row in rows:
try:
link = row.find("a", href=re.compile("details.php"))
if link:
title = link.text
download_url = self.urls['base_url'] + '/' + \
row.find("a", href=re.compile("download.php"))['href']
# FIXME
size = -1
seeders = 1
leechers = 0
if not all([title, download_url]):
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders,
'leechers': leechers, 'hash': ''}
if mode != 'RSS':
sickrage.app.log.debug("Found result: {}".format(title))
results.append(item)
except Exception:
sickrage.app.log.error("Failed parsing provider.")
return results | gborri/SickRage | sickrage/providers/torrent/xthor.py | Python | gpl-3.0 | 4,721 |
from django.contrib.auth import signals
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
from django.test.client import RequestFactory
from django.test import override_settings
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SignalTestCase(TestCase):
urls = 'django.contrib.auth.tests.urls'
fixtures = ['authtestdata.json']
def listener_login(self, user, **kwargs):
self.logged_in.append(user)
def listener_logout(self, user, **kwargs):
self.logged_out.append(user)
def listener_login_failed(self, sender, credentials, **kwargs):
self.login_failed.append(credentials)
def setUp(self):
"""Set up the listeners and reset the logged in/logged out counters"""
self.logged_in = []
self.logged_out = []
self.login_failed = []
signals.user_logged_in.connect(self.listener_login)
signals.user_logged_out.connect(self.listener_logout)
signals.user_login_failed.connect(self.listener_login_failed)
def tearDown(self):
"""Disconnect the listeners"""
signals.user_logged_in.disconnect(self.listener_login)
signals.user_logged_out.disconnect(self.listener_logout)
signals.user_login_failed.disconnect(self.listener_login_failed)
def test_login(self):
# Only a successful login will trigger the success signal.
self.client.login(username='testclient', password='bad')
self.assertEqual(len(self.logged_in), 0)
self.assertEqual(len(self.login_failed), 1)
self.assertEqual(self.login_failed[0]['username'], 'testclient')
# verify the password is cleansed
self.assertTrue('***' in self.login_failed[0]['password'])
# Like this:
self.client.login(username='testclient', password='password')
self.assertEqual(len(self.logged_in), 1)
self.assertEqual(self.logged_in[0].username, 'testclient')
# Ensure there were no more failures.
self.assertEqual(len(self.login_failed), 1)
def test_logout_anonymous(self):
# The log_out function will still trigger the signal for anonymous
# users.
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0], None)
def test_logout(self):
self.client.login(username='testclient', password='password')
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0].username, 'testclient')
def test_update_last_login(self):
"""Ensure that only `last_login` is updated in `update_last_login`"""
user = User.objects.get(pk=3)
old_last_login = user.last_login
user.username = "This username shouldn't get saved"
request = RequestFactory().get('/login')
signals.user_logged_in.send(sender=user.__class__, request=request,
user=user)
user = User.objects.get(pk=3)
self.assertEqual(user.username, 'staff')
self.assertNotEqual(user.last_login, old_last_login)
| 912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/auth/tests/test_signals.py | Python | gpl-2.0 | 3,272 |
import sys
from PySide import QtGui
from view.main_window import KMainWindow
def main():
# Creates main QTGui App
app = QtGui.QApplication(sys.argv)
# Set a style, options include:
# "cleanlooks", "plastique", "motif",
# "Windows", "CDE", "WindowsXP", "WindowsVista"
# Comment out the two lines for the default.
# May also be able to create your own here.
# For the complete list do:
# print QtGui.QStyleFactory.keys()
style = QtGui.QStyleFactory.create("Cleanlooks")
app.setStyle(style)
w = KMainWindow("My Title")
w.show()
# Sets the exit button
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| katerina7479/kadre | main.py | Python | mit | 674 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stats', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='vote',
name='election',
field=models.ForeignKey(blank=True, to='stats.Election', null=True),
),
]
| lordzfc/wyborySam2014 | stats/migrations/0002_auto_20150919_1109.py | Python | mit | 419 |
from Screens.Screen import Screen
from Components.Sources.CanvasSource import CanvasSource
from Components.ActionMap import ActionMap, NumberActionMap
from Tools.Directories import fileExists
from enigma import gFont, getDesktop, gMainDC, eSize, RT_HALIGN_RIGHT, RT_WRAP
def RGB(r,g,b):
return (r<<16)|(g<<8)|b
class OverscanTestScreen(Screen):
skin = """
<screen position="fill">
<ePixmap pixmap="skin_default/overscan.png" position="0,0" size="1920,1080" zPosition="1" alphatest="on" />
</screen>"""
def __init__(self, session, xres=1280, yres=720):
Screen.__init__(self, session)
self.xres, self.yres = getDesktop(0).size().width(), getDesktop(0).size().height()
if (self.xres, self.yres) != (xres, yres):
gMainDC.getInstance().setResolution(xres, yres)
getDesktop(0).resize(eSize(xres, yres))
self.onClose.append(self.__close)
self["actions"] = NumberActionMap(["InputActions", "OkCancelActions"],
{
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"7": self.keyNumber,
"ok": self.ok,
"cancel": self.cancel
})
def __close(self):
gMainDC.getInstance().setResolution(self.xres, self.yres)
getDesktop(0).resize(eSize(self.xres, self.yres))
def ok(self):
self.close(True)
def cancel(self):
self.close(False)
def keyNumber(self, key):
self.close(key)
class FullHDTestScreen(OverscanTestScreen):
skin = """
<screen position="fill">
<ePixmap pixmap="skin_default/testscreen.png" position="0,0" size="1920,1080" zPosition="1" alphatest="on" />
</screen>"""
def __init__(self, session):
OverscanTestScreen.__init__(self, session, 1920, 1080)
self["actions"] = NumberActionMap(["InputActions", "OkCancelActions"],
{
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"ok": self.ok,
"cancel": self.cancel
})
class VideoFinetune(Screen):
skin = """
<screen position="fill">
<widget source="Canvas" render="Canvas" position="fill" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["Canvas"] = CanvasSource()
self.basic_colors = [RGB(255, 255, 255), RGB(255, 255, 0), RGB(0, 255, 255), RGB(0, 255, 0), RGB(255, 0, 255), RGB(255, 0, 0), RGB(0, 0, 255), RGB(0, 0, 0)]
if fileExists("/proc/stb/fb/dst_left"):
self.left = open("/proc/stb/fb/dst_left", "r").read()
self.width = open("/proc/stb/fb/dst_width", "r").read()
self.top = open("/proc/stb/fb/dst_top", "r").read()
self.height = open("/proc/stb/fb/dst_height", "r").read()
if self.left != "00000000" or self.top != "00000000" or self.width != "000002d0" or self.height != "0000000240":
open("/proc/stb/fb/dst_left", "w").write("00000000")
open("/proc/stb/fb/dst_width", "w").write("000002d0")
open("/proc/stb/fb/dst_top", "w").write("00000000")
open("/proc/stb/fb/dst_height", "w").write("0000000240")
self.onClose.append(self.__close)
self["actions"] = NumberActionMap(["InputActions", "OkCancelActions"],
{
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"7": self.keyNumber,
"ok": self.callNext,
"cancel": self.close,
})
self.testpic_brightness()
def __close(self):
open("/proc/stb/fb/dst_left", "w").write(self.left)
open("/proc/stb/fb/dst_width", "w").write(self.width)
open("/proc/stb/fb/dst_top", "w").write(self.top)
open("/proc/stb/fb/dst_height", "w").write(self.height)
def keyNumber(self, key):
(self.testpic_brightness, self.testpic_contrast, self.testpic_colors, self.testpic_filter, self.testpic_gamma, self.testpic_overscan, self.testpic_fullhd)[key-1]()
def callNext(self):
if self.next:
self.next()
def bbox(self, x, y, width, height, col, xx, yy):
c = self["Canvas"]
c.fill(x, y, xx, yy, col)
c.fill(x + width - xx, y, xx, yy, col)
c.fill(x, y + height - yy, xx, yy, col)
c.fill(x + width - xx, y + height - yy, xx, yy, col)
def testpic_brightness(self):
self.next = self.testpic_contrast
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
bbw, bbh = xres / 192, yres / 192
c.fill(0, 0, xres, yres, RGB(0,0,0))
for i in range(15):
col = i * 116 / 14
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, RGB(col, col, col))
if col == 0 or col == 16 or col == 116:
c.fill(x, offset, width, 2, RGB(255, 255, 255))
if i < 2:
c.writeText(x + width, offset, width, eh, RGB(255, 255, 255), RGB(0,0,0), gFont("Regular", 20), "%d." % (i+1))
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,255,255), RGB(0,0,0), gFont("Regular", 40),
_("Brightness"))
c.writeText(xres / 10, yres / 6, xres / 2, yres * 4 / 6, RGB(255,255,255), RGB(0,0,0), gFont("Regular", 20),
_("If your TV has a brightness or contrast enhancement, disable it. If there is something called \"dynamic\", "
"set it to standard. Adjust the backlight level to a value suiting your taste. "
"Turn down contrast on your TV as much as possible.\nThen turn the brightness setting as "
"low as possible, but make sure that the two lowermost shades of gray stay distinguishable.\n"
"Do not care about the bright shades now. They will be set up in the next step.\n"
"If you are happy with the result, press OK."),
RT_WRAP)
c.flush()
def testpic_contrast(self):
self.next = self.testpic_colors
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
bbw, bbh = xres / 192, yres / 192
c.fill(0, 0, xres, yres, RGB(0,0,0))
bbw = xres / 192
bbh = yres / 192
c.fill(0, 0, xres, yres, RGB(255,255,255))
for i in range(15):
col = 185 + i * 5
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, RGB(col, col, col))
if col == 185 or col == 235 or col == 255:
c.fill(x, offset, width, 2, RGB(0,0,0))
if i >= 13:
c.writeText(x + width, offset, width, eh, RGB(0, 0, 0), RGB(255, 255, 255), gFont("Regular", 20), "%d." % (i-13+1))
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,0,0), RGB(255,255,255), gFont("Regular", 40),
_("Contrast"))
c.writeText(xres / 10, yres / 6, xres / 2, yres * 4 / 6, RGB(0,0,0), RGB(255,255,255), gFont("Regular", 20),
_("Now, use the contrast setting to turn up the brightness of the background as much as possible, "
"but make sure that you can still see the difference between the two brightest levels of shades."
"If you have done that, press OK."),
RT_WRAP)
c.flush()
def testpic_colors(self):
self.next = self.testpic_filter
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
bbw = xres / 192
bbh = yres / 192
c.fill(0, 0, xres, yres, RGB(255,255,255))
for i in range(33):
col = i * 255 / 32;
width = xres - xres/5;
ew = width / 33;
offset = xres/10 + ew * i;
y = yres * 2 / 3;
height = yres / 20;
o = yres / 60;
if i < 16:
c1 = 0xFF;
c2 = 0xFF - (0xFF * i / 16);
else:
c1 = 0xFF - (0xFF * (i - 16) / 16);
c2 = 0;
c.fill(offset, y, ew, height, RGB(c1, c2, c2))
c.fill(offset, y + (height + o) * 1, ew, height, RGB(c2, c1, c2))
c.fill(offset, y + (height + o) * 2, ew, height, RGB(c2, c2, c1))
c.fill(offset, y + (height + o) * 3, ew, height, RGB(col, col, col))
if i == 0:
self.bbox(offset, y, ew, height, RGB(0,0,0), bbw, bbh);
self.bbox(offset, y + (height + o) * 1, ew, height, RGB(0,0,0), bbw, bbh);
self.bbox(offset, y + (height + o) * 2, ew, height, RGB(0,0,0), bbw, bbh);
for i in range(8):
height = yres / 3;
eh = height / 8;
offset = yres/6 + eh * i;
x = xres * 2 / 3;
width = yres / 6;
c.fill(x, offset, width, eh, self.basic_colors[i])
if i == 0:
self.bbox(x, offset, width, eh, RGB(0,0,0), bbw, bbh)
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,0,0), RGB(255,255,255), gFont("Regular", 40),
("Color"))
c.writeText(xres / 10, yres / 6, xres / 2, yres * 4 / 6, RGB(0,0,0), RGB(255,255,255), gFont("Regular", 20),
_("Adjust the color settings so that all the color shades are distinguishable, but appear as saturated as possible. "
"If you are happy with the result, press OK to close the video fine-tuning, or use the number keys to select other test screens."),
RT_WRAP)
c.flush()
def testpic_filter(self):
self.next = self.testpic_gamma
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
c.fill(0, 0, xres, yres, RGB(64, 64, 64))
width = xres - xres/5
offset = xres/10
yb = yres * 2 / 3
height = yres / 20
o = yres / 60
border = xres / 60
g1 = 255
g2 = 128
c.fill(offset - border, yb - border, border * 2 + width, border * 2 + (height * 3 + o * 2), RGB(g1, g1, g1))
for x in xrange(0, width, 2):
c.fill(offset + x, yb, 1, height, RGB(g2,g2,g2))
for x in xrange(0, width, 4):
c.fill(offset + x, yb + (o + height), 2, height, RGB(g2,g2,g2))
for x in xrange(0, width, 8):
c.fill(offset + x, yb + (o + height) * 2, 4, height, RGB(g2,g2,g2))
c.flush()
def testpic_gamma(self):
self.next = self.testpic_overscan
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
c.fill(0, 0, xres, yres, RGB(0, 0, 0))
width = xres - xres/5
offset_x = xres/10
height = yres - yres/5
offset_y = yres/10
for y in xrange(0, height, 4):
c.fill(offset_x, offset_y + y, width/2, 2, RGB(255,255,255))
l = 0
fnt = gFont("Regular", height / 14)
import math
for i in xrange(1, 15):
y = i * height / 14
h = y - l
gamma = 0.6 + i * 0.2
col = int(math.pow(.5, 1.0/gamma) * 256.0)
c.fill(offset_x + width/2, offset_y + l, width/2, h, RGB(col,col,col))
c.writeText(offset_x + width/2, offset_y + l, width/2, h, RGB(0,0,0), RGB(col,col,col), fnt, "%1.2f" % gamma, RT_WRAP|RT_HALIGN_RIGHT)
l = y
c.flush()
def testpic_overscan(self):
self.next = self.testpic_fullhd
self.hide()
self.session.openWithCallback(self.testpicCallback, OverscanTestScreen)
def testpic_fullhd(self):
self.next = self.testpic_brightness
self.hide()
self.session.openWithCallback(self.testpicCallback, FullHDTestScreen)
def testpicCallback(self, key):
if key:
if key == True:
self.next()
else:
self.keyNumber(key)
else:
self.close()
| kingvuplus/rr | lib/python/Plugins/SystemPlugins/VideoTune/VideoFinetune.py | Python | gpl-2.0 | 10,747 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.