code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
""" Cisco_IOS_XR_parser_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR parser package configuration.
This module contains definitions
for the following management objects\:
parser\: Parser configuration
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Parser(object):
"""
Parser configuration
.. attribute:: alias
Alias for command mapping
**type**\: :py:class:`Alias <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias>`
.. attribute:: configuration
cli configuration services
**type**\: :py:class:`Configuration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Configuration>`
.. attribute:: history
cli commands history
**type**\: :py:class:`History <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.History>`
.. attribute:: indentation
indentation tracking
**type**\: :py:class:`Indentation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Indentation>`
.. attribute:: submode_exit
Exit submode when only '!' seen in interactive mode
**type**\: :py:class:`SubmodeExit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.SubmodeExit>`
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.alias = Parser.Alias()
self.alias.parent = self
self.configuration = Parser.Configuration()
self.configuration.parent = self
self.history = Parser.History()
self.history.parent = self
self.indentation = Parser.Indentation()
self.indentation.parent = self
self.submode_exit = Parser.SubmodeExit()
self.submode_exit.parent = self
class Indentation(object):
"""
indentation tracking
.. attribute:: indentation_disable
disable the indentation
**type**\: bool
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.indentation_disable = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:indentation'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.indentation_disable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Indentation']['meta_info']
class Alias(object):
"""
Alias for command mapping
.. attribute:: alls
Table of all aliases configured
**type**\: :py:class:`Alls <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias.Alls>`
.. attribute:: configurations
Configuration command alias
**type**\: :py:class:`Configurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias.Configurations>`
.. attribute:: execs
Exec command alias
**type**\: :py:class:`Execs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias.Execs>`
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.alls = Parser.Alias.Alls()
self.alls.parent = self
self.configurations = Parser.Alias.Configurations()
self.configurations.parent = self
self.execs = Parser.Alias.Execs()
self.execs.parent = self
class Execs(object):
"""
Exec command alias
.. attribute:: exec_
Exec alias name
**type**\: list of :py:class:`Exec <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias.Execs.Exec>`
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.exec_ = YList()
self.exec_.parent = self
self.exec_.name = 'exec_'
class Exec(object):
"""
Exec alias name
.. attribute:: identifier <key>
Exec Alias name
**type**\: str
**range:** 0..30
.. attribute:: identifier_xr
Aliased exec command
**type**\: str
**mandatory**\: True
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.identifier = None
self.identifier_xr = None
@property
def _common_path(self):
if self.identifier is None:
raise YPYModelError('Key property identifier is None')
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias/Cisco-IOS-XR-parser-cfg:execs/Cisco-IOS-XR-parser-cfg:exec[Cisco-IOS-XR-parser-cfg:identifier = ' + str(self.identifier) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.identifier is not None:
return True
if self.identifier_xr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias.Execs.Exec']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias/Cisco-IOS-XR-parser-cfg:execs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.exec_ is not None:
for child_ref in self.exec_:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias.Execs']['meta_info']
class Configurations(object):
"""
Configuration command alias
.. attribute:: configuration
Configuration Alias name
**type**\: list of :py:class:`Configuration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias.Configurations.Configuration>`
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.configuration = YList()
self.configuration.parent = self
self.configuration.name = 'configuration'
class Configuration(object):
"""
Configuration Alias name
.. attribute:: identifier <key>
Configuration alias name
**type**\: str
**range:** 0..30
.. attribute:: identifier_xr
Aliased config command
**type**\: str
**mandatory**\: True
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.identifier = None
self.identifier_xr = None
@property
def _common_path(self):
if self.identifier is None:
raise YPYModelError('Key property identifier is None')
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias/Cisco-IOS-XR-parser-cfg:configurations/Cisco-IOS-XR-parser-cfg:configuration[Cisco-IOS-XR-parser-cfg:identifier = ' + str(self.identifier) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.identifier is not None:
return True
if self.identifier_xr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias.Configurations.Configuration']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias/Cisco-IOS-XR-parser-cfg:configurations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.configuration is not None:
for child_ref in self.configuration:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias.Configurations']['meta_info']
class Alls(object):
"""
Table of all aliases configured
.. attribute:: all
Alias name to command mapping
**type**\: list of :py:class:`All <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Alias.Alls.All>`
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.all = YList()
self.all.parent = self
self.all.name = 'all'
class All(object):
"""
Alias name to command mapping
.. attribute:: identifier <key>
Alias name
**type**\: str
**range:** 0..30
.. attribute:: identifier_xr
The actual command
**type**\: str
**mandatory**\: True
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.identifier = None
self.identifier_xr = None
@property
def _common_path(self):
if self.identifier is None:
raise YPYModelError('Key property identifier is None')
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias/Cisco-IOS-XR-parser-cfg:alls/Cisco-IOS-XR-parser-cfg:all[Cisco-IOS-XR-parser-cfg:identifier = ' + str(self.identifier) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.identifier is not None:
return True
if self.identifier_xr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias.Alls.All']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias/Cisco-IOS-XR-parser-cfg:alls'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.all is not None:
for child_ref in self.all:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias.Alls']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:alias'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.alls is not None and self.alls._has_data():
return True
if self.configurations is not None and self.configurations._has_data():
return True
if self.execs is not None and self.execs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Alias']['meta_info']
class History(object):
"""
cli commands history
.. attribute:: size
maximum number of commands in history
**type**\: int
**range:** 1000..5000
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.size = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:history'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.size is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.History']['meta_info']
class Configuration(object):
"""
cli configuration services
.. attribute:: disable
disable for read\-only access users
**type**\: :py:class:`Disable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_parser_cfg.Parser.Configuration.Disable>`
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.disable = Parser.Configuration.Disable()
self.disable.parent = self
class Disable(object):
"""
disable for read\-only access users
.. attribute:: usergroup
Disable config mode for usergroup
**type**\: str
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.usergroup = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:configuration/Cisco-IOS-XR-parser-cfg:disable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.usergroup is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Configuration.Disable']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:configuration'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.disable is not None and self.disable._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.Configuration']['meta_info']
class SubmodeExit(object):
"""
Exit submode when only '!' seen in interactive
mode
.. attribute:: enable
Enable the feature
**type**\: bool
"""
_prefix = 'parser-cfg'
_revision = '2015-06-02'
def __init__(self):
self.parent = None
self.enable = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser/Cisco-IOS-XR-parser-cfg:submode-exit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser.SubmodeExit']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-parser-cfg:parser'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.alias is not None and self.alias._has_data():
return True
if self.configuration is not None and self.configuration._has_data():
return True
if self.history is not None and self.history._has_data():
return True
if self.indentation is not None and self.indentation._has_data():
return True
if self.submode_exit is not None and self.submode_exit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_parser_cfg as meta
return meta._meta_table['Parser']['meta_info']
|
abhikeshav/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_parser_cfg.py
|
Python
|
apache-2.0
| 21,226
|
#!/usr/bin/env python
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (print_function, unicode_literals)
import sys, os
import json
import argparse
import re
from collections import OrderedDict
import dxpy
from dxpy.templating.utils import (print_intro, get_name, get_version, get_metadata, Completer, get_ordinal_str,
prompt_for_var, prompt_for_yn, use_completer, get_language, language_options,
get_pattern, get_timeout, fill_in_name_and_ver, clean, create_files_from_templates)
from dxpy.utils.printing import fill, BOLD, UNDERLINE, DNANEXUS_LOGO, ENDC
from dxpy.app_categories import APP_CATEGORIES
from dxpy.utils.completer import InstanceTypesCompleter
from dxpy.utils.pretty_print import format_table
from dxpy.compat import wrap_stdio_in_codecs
wrap_stdio_in_codecs()
try:
import colorama
colorama.init()
except:
pass
IO_NAME_PATTERN = re.compile('^[a-zA-Z_][0-9a-zA-Z_]*$')
API_VERSION = '1.0.0'
parser = argparse.ArgumentParser(description="Create a source code directory for a DNAnexus app. You will be prompted for various metadata for the app as well as for its input and output specifications.")
parser.add_argument('--json-file', help='Use the metadata and IO spec found in the given file')
parser.add_argument('--language', help='Programming language of your app')
parser.add_argument('--template',
choices=["basic", "parallelized", "scatter-process-gather"], default='basic',
help='Execution pattern of your app')
parser.add_argument('name', help='Name of your app', nargs='?')
args = parser.parse_args()
if args.json_file is not None and not os.path.exists(args.json_file):
parser.error('File not found: ' + args.json_file)
def main(**kwargs):
"""
Entry point for dx-app-wizard.
Note that this function is not meant to be used as a subroutine in your program.
"""
manifest = []
print_intro(API_VERSION)
if args.json_file is not None:
with open(args.json_file, 'r') as json_file:
app_json = json.loads(json_file.read())
# Re-confirm the name
name = get_name(default=args.name or app_json.get('name'))
app_json['name'] = name
version = get_version(default=app_json.get('version'))
app_json['version'] = version
try:
os.mkdir(app_json['name'])
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % app_json['name']) + '\n')
sys.exit(1)
else:
##################
# BASIC METADATA #
##################
name = get_name(default=args.name)
try:
os.mkdir(name)
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % name) + '\n')
sys.exit(1)
title, summary = get_metadata(API_VERSION)
version = get_version()
app_json = OrderedDict()
app_json["name"] = name
app_json["title"] = title or name
app_json['summary'] = summary or name
app_json["dxapi"] = API_VERSION
app_json["version"] = version
############
# IO SPECS #
############
class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash',
'array:int', 'array:float', 'array:string', 'array:boolean',
'record', 'file', 'applet',
'array:record', 'array:file', 'array:applet'])
bool_completer = Completer(['true', 'false'])
print('')
print(BOLD() + 'Input Specification' + ENDC())
print('')
input_spec = True
input_names = []
printed_classes = False
if input_spec:
app_json['inputSpec'] = []
print(fill('You will now be prompted for each input parameter to your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1)
input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True)
if input_name == '':
break
if input_name in input_names:
print(fill('Error: Cannot use the same input parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(input_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
input_names.append(input_name)
input_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your input parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string
''')
printed_classes = True
while True:
input_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if input_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
optional = prompt_for_yn('This is an optional parameter')
default_val = None
if optional and input_class in ['int', 'float', 'string', 'boolean']:
default_val = prompt_for_yn('A default value should be provided')
if default_val:
while True:
if input_class == 'boolean':
use_completer(bool_completer)
default_val = prompt_for_var(' Default value', choices=['true', 'false'])
use_completer()
elif input_class == 'string':
default_val = prompt_for_var(' Default value', allow_empty=True)
else:
default_val = prompt_for_var(' Default value')
try:
if input_class == 'boolean':
default_val = (default_val == 'true')
elif input_class == 'int':
default_val = int(default_val)
elif input_class == 'float':
default_val = float(default_val)
break
except:
print('Not a valid default value for the given class ' + input_class)
else:
default_val = None
# Fill in the input parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = input_name
if input_label != '':
parameter_json['label'] = input_label
parameter_json["class"] = input_class
parameter_json["optional"] = optional
if default_val is not None:
parameter_json['default'] = default_val
# Fill in patterns and blank help string
if input_class == 'file' or input_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['inputSpec'].append(parameter_json)
print('')
print(BOLD() + 'Output Specification' + ENDC())
print('')
output_spec = True
output_names = []
if output_spec:
app_json['outputSpec'] = []
print(fill('You will now be prompted for each output parameter of your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1)
output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True)
if output_name == '':
break
if output_name in output_names:
print(fill('Error: Cannot use the same output parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(output_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
output_names.append(output_name)
output_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your output parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string''')
printed_classes = True
while True:
output_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if output_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
# Fill in the output parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = output_name
if output_label != '':
parameter_json['label'] = output_label
parameter_json["class"] = output_class
# Fill in patterns and blank help string
if output_class == 'file' or output_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['outputSpec'].append(parameter_json)
required_file_input_names = []
optional_file_input_names = []
required_file_array_input_names = []
optional_file_array_input_names = []
file_output_names = []
if 'inputSpec' in app_json:
for param in app_json['inputSpec']:
may_be_missing = param['optional'] and "default" not in param
if param['class'] == 'file':
param_list = optional_file_input_names if may_be_missing else required_file_input_names
elif param['class'] == 'array:file':
param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names
else:
param_list = None
if param_list is not None:
param_list.append(param['name'])
if 'outputSpec' in app_json:
file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file']
##################
# TIMEOUT POLICY #
##################
print('')
print(BOLD() + 'Timeout Policy' + ENDC())
app_json["runSpec"] = OrderedDict({})
app_json['runSpec'].setdefault('timeoutPolicy', {})
timeout, timeout_units = get_timeout(default=app_json['runSpec']['timeoutPolicy'].get('*'))
app_json['runSpec']['timeoutPolicy'].setdefault('*', {})
app_json['runSpec']['timeoutPolicy']['*'].setdefault(timeout_units, timeout)
########################
# LANGUAGE AND PATTERN #
########################
print('')
print(BOLD() + 'Template Options' + ENDC())
# Prompt for programming language if not specified
language = args.language if args.language is not None else get_language()
interpreter = language_options[language].get_interpreter()
app_json["runSpec"]["interpreter"] = interpreter
# Prompt the execution pattern iff the args.pattern is provided and invalid
template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path())
if not os.path.isdir(os.path.join(template_dir, args.template)):
print(fill('The execution pattern "' + args.template + '" is not available for your programming language.'))
pattern = get_pattern(template_dir)
else:
pattern = args.template
template_dir = os.path.join(template_dir, pattern)
with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file:
file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version)
template_app_json = json.loads(file_text)
for key in template_app_json['runSpec']:
app_json['runSpec'][key] = template_app_json['runSpec'][key]
if (language == args.language) and (pattern == args.template):
print('All template options are supplied in the arguments.')
##########################
# APP ACCESS PERMISSIONS #
##########################
print('')
print(BOLD('Access Permissions'))
print(fill('''If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ''' +
BOLD('https://wiki.dnanexus.com/App-Permissions') + '.'))
print('')
print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).'))
if prompt_for_yn("Will this app need access to the Internet?", default=False):
app_json.setdefault('access', {})
app_json['access']['network'] = ['*']
print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' +
UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.'))
print('')
print(fill(UNDERLINE('Direct access to the parent project') + '''. This is not needed if your app specifies outputs,
which will be copied into the project after it's done running.'''))
if prompt_for_yn("Will this app need access to the parent project?", default=False):
app_json.setdefault('access', {})
app_json['access']['project'] = 'CONTRIBUTE'
print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' +
'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') +
' fields of dxapp.json once we generate the app.'))
#######################
# SYSTEM REQUIREMENTS #
#######################
print('')
print(BOLD('System Requirements'))
print('')
print(BOLD('Common instance types:'))
print(format_table(InstanceTypesCompleter.preferred_instance_types.values(),
column_names=InstanceTypesCompleter.instance_types.values()[0]._fields))
print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' +
'your app unless you override it. See ' +
BOLD('https://wiki.dnanexus.com/API-Specification-v1.0.0/Instance-Types') + ' for more information.'))
use_completer(InstanceTypesCompleter())
instance_type = prompt_for_var('Choose an instance type for your app',
default=InstanceTypesCompleter.default_instance_type.Name,
choices=list(InstanceTypesCompleter.instance_types))
app_json['runSpec'].setdefault('systemRequirements', {})
app_json['runSpec']['systemRequirements'].setdefault('*', {})
app_json['runSpec']['systemRequirements']['*']['instanceType'] = instance_type
######################
# HARDCODED DEFAULTS #
######################
# Default of no other authorizedUsers
# app_json['authorizedUsers'] = []
# print('\n' + BOLD('Linux version: '))
app_json['runSpec']['distribution'] = 'Ubuntu'
if any(instance_type.startswith(prefix) for prefix in ('mem1_hdd2', 'mem2_hdd2', 'mem3_hdd2')):
print(fill('Your app will run on Ubuntu 12.04. To use Ubuntu 14.04, select from the list of common instance ' +
'types above.'))
app_json['runSpec']['release'] = '12.04'
else:
app_json['runSpec']['release'] = '14.04'
print(fill('Your app has been configured to run on Ubuntu 14.04. To use Ubuntu 12.04, edit the ' +
BOLD('runSpec.release') + ' field of your dxapp.json.'))
#################
# WRITING FILES #
#################
print('')
print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC())
with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file:
prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n')
manifest.append(os.path.join(name, 'dxapp.json'))
print('')
print(fill('''Your app specification has been written to the
dxapp.json file. You can specify more app options by editing this file
directly (see https://wiki.dnanexus.com/Developer-Portal for complete
documentation).''' + (''' Note that without an input and output specification,
your app can only be built as an APPLET on the system. To publish it to
the DNAnexus community, you must first specify your inputs and outputs.
''' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else "")))
print('')
for subdir in 'src', 'test', 'resources':
try:
os.mkdir(os.path.join(name, subdir))
manifest.append(os.path.join(name, subdir, ''))
except:
sys.stderr.write("Unable to create subdirectory %s/%s" % (name, subdir))
sys.exit(1)
entry_points = ['main']
if pattern == 'parallelized':
entry_points = ['main', 'process', 'postprocess']
elif pattern == 'scatter-process-gather':
entry_points = ['main', 'scatter', 'map', 'process', 'postprocess']
manifest += create_files_from_templates(template_dir, app_json, language,
required_file_input_names, optional_file_input_names,
required_file_array_input_names, optional_file_array_input_names,
file_output_names, pattern,
description='<!-- Insert a description of your app here -->',
entry_points=entry_points)
print("Created files:")
for filename in sorted(manifest):
print("\t", filename)
print("\n" + fill('''App directory created! See
https://wiki.dnanexus.com/Developer-Portal for tutorials on how to modify these files,
or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'''.format(n=name)) + "\n")
print(fill('''Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ''' +
BOLD() + 'resources' + ENDC() +
''' directory will be uploaded so that they will be present in the root directory when the executable is run.'''))
if __name__ == '__main__':
main()
|
jhuttner/dx-toolkit
|
src/python/dxpy/scripts/dx_app_wizard.py
|
Python
|
apache-2.0
| 20,783
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import quote
from swift.common.utils import public
from swift.common.middleware.s3api.controllers.base import Controller
from swift.common.middleware.s3api.s3response import HTTPOk
from swift.common.middleware.s3api.etree import tostring
class S3AclController(Controller):
"""
Handles the following APIs:
* GET Bucket acl
* PUT Bucket acl
* GET Object acl
* PUT Object acl
Those APIs are logged as ACL operations in the S3 server log.
"""
@public
def GET(self, req):
"""
Handles GET Bucket acl and GET Object acl.
"""
resp = req.get_response(self.app, method='HEAD')
acl = resp.object_acl if req.is_object_request else resp.bucket_acl
resp = HTTPOk()
resp.body = tostring(acl.elem())
return resp
@public
def PUT(self, req):
"""
Handles PUT Bucket acl and PUT Object acl.
"""
if req.is_object_request:
headers = {}
src_path = '/%s/%s' % (req.container_name, req.object_name)
# object-sysmeta' can be updated by 'Copy' method,
# but can not be by 'POST' method.
# So headers['X-Copy-From'] for copy request is added here.
headers['X-Copy-From'] = quote(src_path)
headers['Content-Length'] = 0
req.get_response(self.app, 'PUT', headers=headers)
else:
req.get_response(self.app, 'POST')
return HTTPOk()
|
openstack/swift
|
swift/common/middleware/s3api/controllers/s3_acl.py
|
Python
|
apache-2.0
| 2,097
|
"""
Support for TPLink lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.tplink/
"""
import logging
import colorsys
import time
from homeassistant.const import (CONF_HOST, CONF_NAME)
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_KELVIN, ATTR_RGB_COLOR,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR)
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired)
from typing import Tuple
REQUIREMENTS = ['pyHS100==0.3.0']
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_CONSUMPTION = 'current_consumption'
ATTR_DAILY_CONSUMPTION = 'daily_consumption'
ATTR_MONTHLY_CONSUMPTION = 'monthly_consumption'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Initialise pyLB100 SmartBulb."""
from pyHS100 import SmartBulb
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
add_devices([TPLinkSmartBulb(SmartBulb(host), name)], True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return int((byt*100.0)/255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent*255.0)/100.0
# Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212
# pylint: disable=invalid-sequence-index
def rgb_to_hsv(rgb: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Convert RGB tuple (values 0-255) to HSV (degrees, %, %)."""
hue, sat, value = colorsys.rgb_to_hsv(rgb[0]/255, rgb[1]/255, rgb[2]/255)
return int(hue * 360), int(sat * 100), int(value * 100)
# Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212
# pylint: disable=invalid-sequence-index
def hsv_to_rgb(hsv: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Convert HSV tuple (degrees, %, %) to RGB (values 0-255)."""
red, green, blue = colorsys.hsv_to_rgb(hsv[0]/360, hsv[1]/100, hsv[2]/100)
return int(red * 255), int(green * 255), int(blue * 255)
class TPLinkSmartBulb(Light):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb: 'SmartBulb', name):
"""Initialize the bulb."""
self.smartbulb = smartbulb
self._name = None
if name is not None:
self._name = name
self._state = None
self._available = True
self._color_temp = None
self._brightness = None
self._rgb = None
self._supported_features = 0
self._emeter_params = {}
@property
def name(self):
"""Return the name of the Smart Bulb, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def turn_on(self, **kwargs):
"""Turn the light on."""
self.smartbulb.state = self.smartbulb.BULB_STATE_ON
if ATTR_COLOR_TEMP in kwargs:
self.smartbulb.color_temp = \
mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
if ATTR_KELVIN in kwargs:
self.smartbulb.color_temp = kwargs[ATTR_KELVIN]
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.smartbulb.brightness = brightness_to_percentage(brightness)
if ATTR_RGB_COLOR in kwargs:
rgb = kwargs.get(ATTR_RGB_COLOR)
self.smartbulb.hsv = rgb_to_hsv(rgb)
def turn_off(self):
"""Turn the light off."""
self.smartbulb.state = self.smartbulb.BULB_STATE_OFF
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def rgb_color(self):
"""Return the color in RGB."""
return self._rgb
@property
def is_on(self):
"""Return True if device is on."""
return self._state
def update(self):
"""Update the TP-Link Bulb's state."""
from pyHS100 import SmartDeviceException
try:
self._available = True
if self._supported_features == 0:
self.get_features()
self._state = (
self.smartbulb.state == self.smartbulb.BULB_STATE_ON)
if self._name is None:
self._name = self.smartbulb.alias
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = brightness_from_percentage(
self.smartbulb.brightness)
if self._supported_features & SUPPORT_COLOR_TEMP:
if (self.smartbulb.color_temp is not None and
self.smartbulb.color_temp != 0):
self._color_temp = kelvin_to_mired(
self.smartbulb.color_temp)
if self._supported_features & SUPPORT_RGB_COLOR:
self._rgb = hsv_to_rgb(self.smartbulb.hsv)
if self.smartbulb.has_emeter:
self._emeter_params[ATTR_CURRENT_CONSUMPTION] \
= "%.1f W" % self.smartbulb.current_consumption()
daily_statistics = self.smartbulb.get_emeter_daily()
monthly_statistics = self.smartbulb.get_emeter_monthly()
try:
self._emeter_params[ATTR_DAILY_CONSUMPTION] \
= "%.2f kW" % daily_statistics[int(
time.strftime("%d"))]
self._emeter_params[ATTR_MONTHLY_CONSUMPTION] \
= "%.2f kW" % monthly_statistics[int(
time.strftime("%m"))]
except KeyError:
# device returned no daily/monthly history
pass
except (SmartDeviceException, OSError) as ex:
_LOGGER.warning("Could not read state for %s: %s", self._name, ex)
self._available = False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def get_features(self):
"""Determine all supported features in one go."""
if self.smartbulb.is_dimmable:
self._supported_features += SUPPORT_BRIGHTNESS
if self.smartbulb.is_variable_color_temp:
self._supported_features += SUPPORT_COLOR_TEMP
if self.smartbulb.is_color:
self._supported_features += SUPPORT_RGB_COLOR
|
ewandor/home-assistant
|
homeassistant/components/light/tplink.py
|
Python
|
apache-2.0
| 6,927
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/static-lsps/static-lsp/transit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for transit LSPs
"""
__slots__ = (
"_path_helper", "_extmethods", "__next_hop", "__incoming_label", "__push_label"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__next_hop = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=False,
)
self.__incoming_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="incoming-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__push_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="push-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"static-lsps",
"static-lsp",
"transit",
"state",
]
def _get_next_hop(self):
"""
Getter method for next_hop, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/next_hop (inet:ip-address)
YANG Description: next hop IP address for the LSP
"""
return self.__next_hop
def _set_next_hop(self, v, load=False):
"""
Setter method for next_hop, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/next_hop (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop() directly.
YANG Description: next hop IP address for the LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """next_hop must be of a type compatible with inet:ip-address""",
"defined-type": "inet:ip-address",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address', is_config=False)""",
}
)
self.__next_hop = t
if hasattr(self, "_set"):
self._set()
def _unset_next_hop(self):
self.__next_hop = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=False,
)
def _get_incoming_label(self):
"""
Getter method for incoming_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/incoming_label (oc-mplst:mpls-label)
YANG Description: label value on the incoming packet
"""
return self.__incoming_label
def _set_incoming_label(self, v, load=False):
"""
Setter method for incoming_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/incoming_label (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_incoming_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_incoming_label() directly.
YANG Description: label value on the incoming packet
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="incoming-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """incoming_label must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="incoming-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__incoming_label = t
if hasattr(self, "_set"):
self._set()
def _unset_incoming_label(self):
self.__incoming_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="incoming-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_push_label(self):
"""
Getter method for push_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/push_label (oc-mplst:mpls-label)
YANG Description: label value to push at the current hop for the
LSP
"""
return self.__push_label
def _set_push_label(self, v, load=False):
"""
Setter method for push_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/push_label (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_push_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_push_label() directly.
YANG Description: label value to push at the current hop for the
LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="push-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """push_label must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="push-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__push_label = t
if hasattr(self, "_set"):
self._set()
def _unset_push_label(self):
self.__push_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="push-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
next_hop = __builtin__.property(_get_next_hop)
incoming_label = __builtin__.property(_get_incoming_label)
push_label = __builtin__.property(_get_push_label)
_pyangbind_elements = OrderedDict(
[
("next_hop", next_hop),
("incoming_label", incoming_label),
("push_label", push_label),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/static-lsps/static-lsp/transit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for transit LSPs
"""
__slots__ = (
"_path_helper", "_extmethods", "__next_hop", "__incoming_label", "__push_label"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__next_hop = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=False,
)
self.__incoming_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="incoming-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__push_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="push-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"static-lsps",
"static-lsp",
"transit",
"state",
]
def _get_next_hop(self):
"""
Getter method for next_hop, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/next_hop (inet:ip-address)
YANG Description: next hop IP address for the LSP
"""
return self.__next_hop
def _set_next_hop(self, v, load=False):
"""
Setter method for next_hop, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/next_hop (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop() directly.
YANG Description: next hop IP address for the LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """next_hop must be of a type compatible with inet:ip-address""",
"defined-type": "inet:ip-address",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address', is_config=False)""",
}
)
self.__next_hop = t
if hasattr(self, "_set"):
self._set()
def _unset_next_hop(self):
self.__next_hop = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="next-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=False,
)
def _get_incoming_label(self):
"""
Getter method for incoming_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/incoming_label (oc-mplst:mpls-label)
YANG Description: label value on the incoming packet
"""
return self.__incoming_label
def _set_incoming_label(self, v, load=False):
"""
Setter method for incoming_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/incoming_label (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_incoming_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_incoming_label() directly.
YANG Description: label value on the incoming packet
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="incoming-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """incoming_label must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="incoming-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__incoming_label = t
if hasattr(self, "_set"):
self._set()
def _unset_incoming_label(self):
self.__incoming_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="incoming-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_push_label(self):
"""
Getter method for push_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/push_label (oc-mplst:mpls-label)
YANG Description: label value to push at the current hop for the
LSP
"""
return self.__push_label
def _set_push_label(self, v, load=False):
"""
Setter method for push_label, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/push_label (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_push_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_push_label() directly.
YANG Description: label value to push at the current hop for the
LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="push-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """push_label must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),], is_leaf=True, yang_name="push-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__push_label = t
if hasattr(self, "_set"):
self._set()
def _unset_push_label(self):
self.__push_label = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
],
is_leaf=True,
yang_name="push-label",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
next_hop = __builtin__.property(_get_next_hop)
incoming_label = __builtin__.property(_get_incoming_label)
push_label = __builtin__.property(_get_push_label)
_pyangbind_elements = OrderedDict(
[
("next_hop", next_hop),
("incoming_label", incoming_label),
("push_label", push_label),
]
)
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/static_lsps/static_lsp/transit/state/__init__.py
|
Python
|
apache-2.0
| 43,391
|
from control import BaseControl
from Tkinter import Menu as TkMenu, BooleanVar
class MenuItem(object):
def __init__(self, menu, title, **kwargs):
self._menu = menu
if kwargs.get('checkbox'):
# Checkbox menu item
self._checkvar = BooleanVar()
self._checkvar.set(kwargs.get('checked', False))
self._menu._ctrl.add_checkbutton(label=title, command=self._on_check, variable=self._checkvar)
else:
# NOTE: For whatever reason, lambdas do not work in this case...
self._menu._ctrl.add_command(label=title, command=self._on_click)
if kwargs.get('on_click'):
self.on_click = kwargs['on_click']
def _on_check(self):
"""Handler for keeping on_click virtual with checkbox option"""
self.on_click(self._checkvar.get())
def _on_click(self):
"""Handler for keeping on_click virtual"""
self.on_click()
def on_click(self):
pass
def __exit__(self, *args):
pass
def __enter__(self, *args):
return self
class BaseMenu(BaseControl):
def __init__(self, parent_hnd, **kwargs):
BaseControl.__init__(self, TkMenu(parent_hnd))
if kwargs.get('tearoff'):
self.tearoff = kwargs['tearoff']
else:
self.tearoff = False
@property
def tearoff(self):
return self._control_get('tearoff') == 1
@tearoff.setter
def tearoff(self, value):
self._control_set('tearoff', 1 if value else 0)
def create(self, title, **kwargs):
return MenuItem(self, title, **kwargs)
def separator(self):
self._ctrl.add_separator()
class Menu(BaseMenu):
def __init__(self, menubar, title, **kwargs):
BaseMenu.__init__(self, menubar._ctrl, **kwargs)
self._mainmenu = menubar
menubar._ctrl.add_cascade(label=title, menu=self._ctrl)
def __exit__(self, *args):
pass
def __enter__(self, *args):
return self
class MainMenu(BaseControl):
def __init__(self, parent):
BaseControl.__init__(self, TkMenu(parent._frame))
parent._control_set('menu', self._ctrl)
def create(self, title, **kwargs):
return Menu(self, title, **kwargs)
class PopupMenu(BaseMenu):
def __init__(self, parent, **kwargs):
BaseMenu.__init__(self, parent._frame, **kwargs)
def popup(self, x, y):
try:
self._ctrl.tk_popup(x, y, 0)
finally:
self._ctrl.grab_release()
|
todd-x86/tkplus
|
menu.py
|
Python
|
apache-2.0
| 2,534
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# file: hello.py
print('Hello!Python!')
|
virus-warnning/pysamples
|
beginner/000-hello/hello.py
|
Python
|
apache-2.0
| 87
|
from disco.core import result_iterator
from discomll import dataset
from discomll.regression import locally_weighted_linear_regression
training_data = dataset.Data(data_tag=["test:regression_data1", "test:regression_data2"],
data_type="chunk",
id_index=0,
X_indices=[0],
y_index=1)
fitting_data = dataset.Data(data_tag=["test:regression_data_test1", "test:regression_data_test2"],
data_type="chunk",
id_index=0,
X_indices=[0],
y_index=1)
# fit fitting data to training data
results = locally_weighted_linear_regression.fit_predict(training_data, fitting_data, tau=10)
# output results
for k, v in result_iterator(results):
print k, v
|
romanorac/discomll
|
discomll/examples/regression/locally_weighted_linear_regression_ddfs.py
|
Python
|
apache-2.0
| 871
|
"""
navdoon.destination.file
------------------------
Define destinations to flush metrics to files
"""
from navdoon.destination.stream import Stream, CsvStream
from navdoon.pystdlib.typing import Any
class TextFile(Stream):
"""Destination to flush metrics to a file"""
def __init__(self, name):
# type: (str) -> None
self._name = None # type: str
self.name = name
file_handle = open(name, 'at')
Stream.__init__(self, file_handle)
def __del__(self):
self.stream.close()
@property
def name(self):
# type: () -> str
return self._name
@name.setter
def name(self, name):
# type: (str) -> None
self._name = name
def __eq__(self, other):
# type: (Any) -> bool
return self._name == other.name and self.pattern == other.pattern
class CsvFile(TextFile):
"""Destination to flush metrics to a CSV file"""
def __init__(self, name):
# type: (str) -> None
TextFile.__init__(self, name)
self.pattern = '"{name}","{value}","{timestamp}"' # type: str
self.append = "\r\n" # type: str
|
farzadghanei/navdoon
|
navdoon/destination/file.py
|
Python
|
apache-2.0
| 1,149
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from geo import geo
from routes import Routes
blueprints = (
(geo.create_blueprint(), '/services/geo'),)
base_routes = Routes()
class Eclipse2017GeoApp(Flask):
"""
Eclipse 2017 application.
"""
def __init__(
self, project_id, session_enc_key, google_oauth2_client_id,
google_oauth2_client_secret, debug=False,
blueprints=blueprints, routes=base_routes, geo=geo,
**kwargs):
super(Eclipse2017GeoApp, self).__init__(__name__, **kwargs)
self.config['PROJECT_ID'] = project_id
self.config['SECRET_KEY'] = session_enc_key
self.config['GOOGLE_OAUTH2_CLIENT_ID'] = google_oauth2_client_id
self.config['GOOGLE_OAUTH2_CLIENT_SECRET'] = google_oauth2_client_secret
self.geo = geo
self.debug = debug
routes.register(self, blueprints)
|
google/eclipse2017
|
geo/app/app/backend/eclipse2017_app.py
|
Python
|
apache-2.0
| 1,466
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from testtools import skipIf
from heat.common import exception
from heat.common import template_format
from heat.engine import clients
from heat.engine import environment
from heat.engine.hot.template import HOTemplate
from heat.engine import parser
from heat.engine import resources
from heat.engine.resources import instance as instances
from heat.engine import service
from heat.openstack.common.importutils import try_import
from heat.openstack.common.rpc import common as rpc_common
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.tests.v1_1 import fakes
test_template_volumeattach = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": "test_KeyName"
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/%s"
}
}
}
}
'''
test_template_ref = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "%s" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_valid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2 KeyPair to' + \
'enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_invalid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2 KeyPair to enable SSH ' + \
'access to the instances",' + \
''' "Type" : "String"
}
},
"Mappings" : {
"AWSInstanceType2Arch" : {
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"cc1.4xlarge" : { "Arch" : "64HVM" },
"cc2.8xlarge" : { "Arch" : "64HVM" },
"cg1.4xlarge" : { "Arch" : "64HVM" }
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
''' + \
'"ImageId" : { "Fn::FindInMap" : [ "DistroArch2AMI", { "Ref" : ' + \
'"LinuxDistribution" },' + \
'{ "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : ' + \
'"InstanceType" }, "Arch" ] } ] },' + \
'''
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName"}
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_invalid_resources = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template for xyz.",
"Parameters" : {
"InstanceType" : {
"Description" : "Defined instance type",
"Type" : "String",
"Default" : "node.ee",
"AllowedValues" : ["node.ee", "node.apache", "node.api"],
"ConstraintDescription" : "must be a valid instance type."
}
},
"Resources" : {
"Type" : "AWS::EC2::Instance",
"Metadata" : {
},
"Properties" : {
"ImageId" : { "Ref" : "centos-6.4-20130701-0" },
"InstanceType" : { "Ref" : "InstanceType" }
}
}
}
'''
test_template_invalid_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"UnknownProperty": "unknown"
}
}
}
}
'''
test_template_unimplemented_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SourceDestCheck": "false"
}
}
}
}
'''
test_template_invalid_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Destroy",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_snapshot_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Snapshot",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_volume_snapshot = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"DeletionPolicy": "Snapshot",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
}
}
}
'''
test_unregistered_key = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_image = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_invalid_secgroups = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroups": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_invalid_secgroupids = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroupIds": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_nova_client_exception = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large"
}
}
}
}
'''
test_template_unique_logical_name = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
},
"AName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"AName": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_cfn_parameter_label = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String",
"Label" : "Nova KeyPair Name"
},
},
"Resources" : {
"AName": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_hot_parameter_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
label: Nova KeyPair Name
resources:
my_instance:
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
outputs:
instance_ip:
description: The IP address of the deployed instance
value: { get_attr: [my_instance, PublicIp] }
'''
test_template_duplicate_parameters = '''
# This is a hello world HOT template just defining a single compute instance
heat_template_version: 2013-05-23
parameter_groups:
- label: Server Group
description: A group of parameters for the server
parameters:
- InstanceType
- KeyName
- ImageId
- label: Database Group
description: A group of parameters for the database
parameters:
- db_password
- db_port
- InstanceType
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
InstanceType:
type: string
description: Instance type for the instance to be created
default: m1.small
constraints:
- allowed_values: [m1.tiny, m1.small, m1.large]
description: Value must be one of 'm1.tiny', 'm1.small' or 'm1.large'
ImageId:
type: string
description: ID of the image to use for the instance
# parameters below are not used in template, but are for verifying parameter
# validation support in HOT
db_password:
type: string
description: Database password
hidden: true
constraints:
- length: { min: 6, max: 8 }
description: Password length must be between 6 and 8 characters
- allowed_pattern: "[a-zA-Z0-9]+"
description: Password must consist of characters and numbers only
- allowed_pattern: "[A-Z]+[a-zA-Z0-9]*"
description: Password must start with an uppercase character
db_port:
type: number
description: Database port number
default: 50000
constraints:
- range: { min: 40000, max: 60000 }
description: Port number must be between 40000 and 60000
resources:
my_instance:
# Use an AWS resource type since this exists; so why use other name here?
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
outputs:
instance_ip:
description: The IP address of the deployed instance
value: { get_attr: [my_instance, PublicIp] }
'''
test_template_invalid_parameter_name = '''
# This is a hello world HOT template just defining a single compute instance
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- label: Server Group
description: A group of parameters for the server
parameters:
- InstanceType
- KeyName
- ImageId
- label: Database Group
description: A group of parameters for the database
parameters:
- db_password
- db_port
- SomethingNotHere
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
InstanceType:
type: string
description: Instance type for the instance to be created
default: m1.small
constraints:
- allowed_values: [m1.tiny, m1.small, m1.large]
description: Value must be one of 'm1.tiny', 'm1.small' or 'm1.large'
ImageId:
type: string
description: ID of the image to use for the instance
# parameters below are not used in template, but are for verifying parameter
# validation support in HOT
db_password:
type: string
description: Database password
hidden: true
constraints:
- length: { min: 6, max: 8 }
description: Password length must be between 6 and 8 characters
- allowed_pattern: "[a-zA-Z0-9]+"
description: Password must consist of characters and numbers only
- allowed_pattern: "[A-Z]+[a-zA-Z0-9]*"
description: Password must start with an uppercase character
db_port:
type: number
description: Database port number
default: 50000
constraints:
- range: { min: 40000, max: 60000 }
description: Port number must be between 40000 and 60000
resources:
my_instance:
# Use an AWS resource type since this exists; so why use other name here?
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
outputs:
instance_ip:
description: The IP address of the deployed instance
value: { get_attr: [my_instance, PublicIp] }
'''
test_template_hot_no_parameter_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
resources:
my_instance:
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
'''
test_template_no_parameters = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- label: Server Group
description: A group of parameters for the server
- label: Database Group
description: A group of parameters for the database
'''
test_template_default_override = '''
heat_template_version: 2013-05-23
description: create a network
parameters:
net_name:
type: string
default: defaultnet
description: Name of private network to be created
resources:
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: net_name }
'''
test_template_no_default = '''
heat_template_version: 2013-05-23
description: create a network
parameters:
net_name:
type: string
description: Name of private network to be created
resources:
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: net_name }
'''
class validateTest(HeatTestCase):
def setUp(self):
super(validateTest, self).setUp()
resources.initialise()
self.fc = fakes.FakeClient()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
def test_validate_volumeattach_valid(self):
t = template_format.parse(test_template_volumeattach % 'vdq')
stack = parser.Stack(self.ctx, 'test_stack', parser.Template(t))
volumeattach = stack['MountPoint']
self.assertIsNone(volumeattach.validate())
def test_validate_volumeattach_invalid(self):
t = template_format.parse(test_template_volumeattach % 'sda')
stack = parser.Stack(self.ctx, 'test_stack', parser.Template(t))
volumeattach = stack['MountPoint']
self.assertRaises(exception.StackValidationFailed,
volumeattach.validate)
def test_validate_ref_valid(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual('test.', res['Description'])
def test_validate_with_environment(self):
test_template = test_template_ref % 'WikiDatabase'
test_template = test_template.replace('AWS::EC2::Instance',
'My::Instance')
t = template_format.parse(test_template)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
params = {'resource_registry': {'My::Instance': 'AWS::EC2::Instance'}}
res = dict(engine.validate_template(None, t, params))
self.assertEqual('test.', res['Description'])
def test_validate_hot_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
resources:
my_instance:
type: AWS::EC2::Instance
""")
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual('test.', res['Description'])
def test_validate_ref_invalid(self):
t = template_format.parse(test_template_ref % 'WikiDatabasez')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_findinmap_valid(self):
t = template_format.parse(test_template_findinmap_valid)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual('test.', res['Description'])
def test_validate_findinmap_invalid(self):
t = template_format.parse(test_template_findinmap_invalid)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_parameters(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
# Note: the assertion below does not expect a CFN dict of the parameter
# but a dict of the parameters.Schema object.
# For API CFN backward compatibility, formating to CFN is done in the
# API layer in heat.engine.api.format_validate_parameter.
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing EC2KeyPair to enable SSH '
'access to the instances',
'NoEcho': 'false',
'Label': 'KeyName'}}
self.assertEqual(expected, res['Parameters'])
def test_validate_parameters_env_override(self):
t = template_format.parse(test_template_default_override)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
env_params = {'net_name': 'betternetname'}
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, env_params))
self.assertEqual('betternetname',
res['Parameters']['net_name']['Default'])
def test_validate_parameters_env_provided(self):
t = template_format.parse(test_template_no_default)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
env_params = {'net_name': 'betternetname'}
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, env_params))
self.assertEqual('betternetname',
res['Parameters']['net_name']['Default'])
def test_validate_hot_parameter_label(self):
t = template_format.parse(test_template_hot_parameter_label)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
parameters = res['Parameters']
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing key pair to use for the '
'instance',
'NoEcho': 'false',
'Label': 'Nova KeyPair Name'}}
self.assertEqual(expected, parameters)
def test_validate_hot_no_parameter_label(self):
t = template_format.parse(test_template_hot_no_parameter_label)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
parameters = res['Parameters']
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing key pair to use for the '
'instance',
'NoEcho': 'false',
'Label': 'KeyName'}}
self.assertEqual(expected, parameters)
def test_validate_cfn_parameter_label(self):
t = template_format.parse(test_template_cfn_parameter_label)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
parameters = res['Parameters']
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing EC2KeyPair to enable SSH '
'access to the instances',
'NoEcho': 'false',
'Label': 'Nova KeyPair Name'}}
self.assertEqual(expected, parameters)
def test_validate_properties(self):
t = template_format.parse(test_template_invalid_property)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual({'Error': 'Unknown Property UnknownProperty'}, res)
def test_invalid_resources(self):
t = template_format.parse(test_template_invalid_resources)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual({'Error': 'Resources must contain Resource. '
'Found a [string] instead'},
res)
def test_invalid_section_cfn(self):
t = template_format.parse(
"""
{
'AWSTemplateFormatVersion': '2010-09-09',
'Resources': {
'server': {
'Type': 'OS::Nova::Server'
}
},
'Output': {}
}
""")
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
ex = self.assertRaises(rpc_common.ClientException,
engine.validate_template, None, t)
self.assertEqual(ex._exc_info[0], exception.InvalidTemplateSection)
self.assertEqual('The template section is invalid: Output',
str(ex._exc_info[1]))
def test_invalid_section_hot(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
resources:
server:
type: OS::Nova::Server
output:
""")
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
ex = self.assertRaises(rpc_common.ClientException,
engine.validate_template, None, t)
self.assertEqual(ex._exc_info[0], exception.InvalidTemplateSection)
self.assertEqual('The template section is invalid: output',
str(ex._exc_info[1]))
def test_unimplemented_property(self):
t = template_format.parse(test_template_unimplemented_property)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual(
{'Error': 'Property SourceDestCheck not implemented yet'},
res)
def test_invalid_deletion_policy(self):
t = template_format.parse(test_template_invalid_deletion_policy)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual({'Error': 'Invalid DeletionPolicy Destroy'}, res)
def test_snapshot_deletion_policy(self):
t = template_format.parse(test_template_snapshot_deletion_policy)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual(
{'Error': 'Snapshot DeletionPolicy not supported'}, res)
@skipIf(try_import('cinderclient.v1.volume_backups') is None,
'unable to import volume_backups')
def test_volume_snapshot_deletion_policy(self):
t = template_format.parse(test_template_volume_snapshot)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t, {}))
self.assertEqual({'Description': u'test.', 'Parameters': {}}, res)
def test_validate_template_without_resources(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'At least one Resources member '
'must be defined.'}, res)
def test_validate_template_with_invalid_resource_type(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
Type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'u\'"Type" is not a valid keyword '
'inside a resource definition\''}, res)
def test_validate_template_with_invalid_resource_properties(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
Properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'u\'"Properties" is not a valid keyword '
'inside a resource definition\''}, res)
def test_validate_template_with_invalid_resource_matadata(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
Metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'u\'"Metadata" is not a valid keyword '
'inside a resource definition\''}, res)
def test_validate_template_with_invalid_resource_depends_on(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
DependsOn: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'u\'"DependsOn" is not a valid keyword '
'inside a resource definition\''}, res)
def test_validate_template_with_invalid_resource_deletion_polciy(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
DeletionPolicy: dummy
update_policy:
foo: bar
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'u\'"DeletionPolicy" is not a valid '
'keyword inside a resource definition\''},
res)
def test_validate_template_with_invalid_resource_update_policy(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
UpdatePolicy:
foo: bar
''')
self.m.StubOutWithMock(service.EngineListener, 'start')
service.EngineListener.start().AndReturn(None)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, hot_tpl, {}))
self.assertEqual({'Error': 'u\'"UpdatePolicy" is not a valid '
'keyword inside a resource definition\''},
res)
def test_unregistered_key(self):
t = template_format.parse(test_unregistered_key)
template = parser.Template(t)
params = {'KeyName': 'not_registered'}
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment(params))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
clients.OpenStackClients.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.StackValidationFailed, resource.validate)
def test_unregistered_image(self):
t = template_format.parse(test_template_image)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
clients.OpenStackClients.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.StackValidationFailed, resource.validate)
self.m.VerifyAll()
def test_duplicated_image(self):
t = template_format.parse(test_template_image)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
image_type = collections.namedtuple("Image", ("id", "name"))
image_list = [image_type(id='768b5464-3df5-4abf-be33-63b60f8b99d0',
name='image_name'),
image_type(id='a57384f5-690f-48e1-bf46-c4291e6c887e',
name='image_name')]
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list().AndReturn(image_list)
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
clients.OpenStackClients.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.StackValidationFailed,
resource.validate)
self.m.VerifyAll()
def test_invalid_security_groups_with_nics(self):
t = template_format.parse(test_template_invalid_secgroups)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
image_type = collections.namedtuple("Image", ("id", "name"))
image_list = [image_type(id='768b5464-3df5-4abf-be33-63b60f8b99d0',
name='image_name')]
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list().AndReturn(image_list)
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
def test_invalid_security_group_ids_with_nics(self):
t = template_format.parse(test_template_invalid_secgroupids)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
image_type = collections.namedtuple("Image", ("id", "name"))
image_list = [image_type(id='768b5464-3df5-4abf-be33-63b60f8b99d0',
name='image_name')]
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list().AndReturn(image_list)
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
def test_client_exception_from_nova_client(self):
t = template_format.parse(test_template_nova_client_exception)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list().AndRaise(
clients.novaclient.exceptions.ClientException(500))
self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertRaises(exception.StackValidationFailed, stack.validate)
self.m.VerifyAll()
def test_validate_unique_logical_name(self):
t = template_format.parse(test_template_unique_logical_name)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'AName': 'test',
'KeyName': 'test'}))
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_validate_duplicate_parameters_in_group(self):
t = template_format.parse(test_template_duplicate_parameters)
template = HOTemplate(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({
'KeyName': 'test',
'ImageId': 'sometestid',
'db_password': 'Pass123'
}))
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('The InstanceType parameter must be assigned to '
'one Parameter Group only.'), str(exc))
def test_validate_invalid_parameter_in_group(self):
t = template_format.parse(test_template_invalid_parameter_name)
template = HOTemplate(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({
'KeyName': 'test',
'ImageId': 'sometestid',
'db_password': 'Pass123'}))
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('The Parameter name (SomethingNotHere) does not '
'reference an existing parameter.'), str(exc))
def test_validate_no_parameters_in_group(self):
t = template_format.parse(test_template_no_parameters)
template = HOTemplate(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameters must be provided for each Parameter '
'Group.'), str(exc))
|
NeCTAR-RC/heat
|
heat/tests/test_validate.py
|
Python
|
apache-2.0
| 48,353
|
import vtk
class VtkAdaptor:
def __init__(self, **kwargs):
self.point_count = 0
self.face_count = 0
self.points = None
self.polys = None
self.poly_data = None
self.scalars = None
def create_empty_mesh(self):
self.pcoords = vtk.vtkFloatArray()
self.pcoords.SetNumberOfComponents(3)
self.points = vtk.vtkPoints()
self.polys = vtk.vtkCellArray()
self.poly_data = vtk.vtkPolyData()
def initialize_colors(self):
self.scalars = vtk.vtkFloatArray()
self.scalars.SetNumberOfComponents(1)
self.scalars.SetNumberOfTuples(self.face_count)
def create_vert(self, coords):
self.pcoords.InsertNextTuple3(*coords)
index = self.point_count
self.point_count += 1
return index
def create_face(self, verts):
self.polys.InsertNextCell(len(verts), verts)
index = self.face_count
self.face_count += 1
return index
def color_face(self, face, color_index):
self.scalars.SetTuple1(face, color_index)
def finish_mesh(self):
self.points.SetData(self.pcoords)
self.poly_data.SetPoints(self.points)
self.poly_data.SetPolys(self.polys)
if self.scalars:
self.poly_data.GetCellData().SetScalars(self.scalars)
def get_mesh(self):
return self.poly_data
|
cwant/tessagon
|
tessagon/adaptors/vtk_adaptor.py
|
Python
|
apache-2.0
| 1,395
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
from dogpile.cache import api
from dogpile.cache import util as dp_util
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from oslo_cache import exception
from oslo_cache._i18n import _, _LW
NO_VALUE = api.NO_VALUE
LOG = log.getLogger(__name__)
class MongoCacheBackend(api.CacheBackend):
"""A MongoDB based caching backend implementing dogpile backend APIs.
Arguments accepted in the arguments dictionary:
:param db_hosts: string (required), hostname or IP address of the
MongoDB server instance. This can be a single MongoDB connection URI,
or a list of MongoDB connection URIs.
:param db_name: string (required), the name of the database to be used.
:param cache_collection: string (required), the name of collection to store
cached data.
*Note:* Different collection name can be provided if there is need to
create separate container (i.e. collection) for cache data. So region
configuration is done per collection.
Following are optional parameters for MongoDB backend configuration,
:param username: string, the name of the user to authenticate.
:param password: string, the password of the user to authenticate.
:param max_pool_size: integer, the maximum number of connections that the
pool will open simultaneously. By default the pool size is 10.
:param w: integer, write acknowledgement for MongoDB client
If not provided, then no default is set on MongoDB and then write
acknowledgement behavior occurs as per MongoDB default. This parameter
name is same as what is used in MongoDB docs. This value is specified
at collection level so its applicable to `cache_collection` db write
operations.
If this is a replica set, write operations will block until they have
been replicated to the specified number or tagged set of servers.
Setting w=0 disables write acknowledgement and all other write concern
options.
:param read_preference: string, the read preference mode for MongoDB client
Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
``secondaryPreferred``, or ``nearest``. This read_preference is
specified at collection level so its applicable to `cache_collection`
db read operations.
:param use_replica: boolean, flag to indicate if replica client to be
used. Default is `False`. `replicaset_name` value is required if
`True`.
:param replicaset_name: string, name of replica set.
Becomes required if `use_replica` is `True`
:param son_manipulator: string, name of class with module name which
implements MongoDB SONManipulator.
Default manipulator used is :class:`.BaseTransform`.
This manipulator is added per database. In multiple cache
configurations, the manipulator name should be same if same
database name ``db_name`` is used in those configurations.
SONManipulator is used to manipulate custom data types as they are
saved or retrieved from MongoDB. Custom impl is only needed if cached
data is custom class and needs transformations when saving or reading
from db. If dogpile cached value contains built-in data types, then
BaseTransform class is sufficient as it already handles dogpile
CachedValue class transformation.
:param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
time-to-live value.
If value is greater than 0, then its assumed that cache_collection
needs to be TTL type (has index at 'doc_date' field).
By default, the value is -1 and its disabled.
Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
.. NOTE::
This parameter is different from Dogpile own
expiration_time, which is the number of seconds after which Dogpile
will consider the value to be expired. When Dogpile considers a
value to be expired, it continues to use the value until generation
of a new value is complete, when using CacheRegion.get_or_create().
Therefore, if you are setting `mongo_ttl_seconds`, you will want to
make sure it is greater than expiration_time by at least enough
seconds for new values to be generated, else the value would not
be available during a regeneration, forcing all threads to wait for
a regeneration each time a value expires.
:param ssl: boolean, If True, create the connection to the server
using SSL. Default is `False`. Client SSL connection parameters depends
on server side SSL setup. For further reference on SSL configuration:
<http://docs.mongodb.org/manual/tutorial/configure-ssl/>
:param ssl_keyfile: string, the private keyfile used to identify the
local connection against mongod. If included with the certfile then
only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
:param ssl_certfile: string, the certificate file used to identify the
local connection against mongod. Used only when `ssl` is `True`.
:param ssl_ca_certs: string, the ca_certs file contains a set of
concatenated 'certification authority' certificates, which are used to
validate certificates passed from the other end of the connection.
Used only when `ssl` is `True`.
:param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
a certificate is required from the other side of the connection, and
whether it will be validated if provided. It must be one of the three
values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
(not required, but validated if provided), or
``ssl.CERT_REQUIRED`` (required and validated). If the value of this
parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
must point to a file of CA certificates. Used only when `ssl`
is `True`.
Rest of arguments are passed to mongo calls for read, write and remove.
So related options can be specified to pass to these operations.
Further details of various supported arguments can be referred from
<http://api.mongodb.org/python/current/api/pymongo/>
"""
def __init__(self, arguments):
self.api = MongoApi(arguments)
@dp_util.memoized_property
def client(self):
"""Initializes MongoDB connection and collection defaults.
This initialization is done only once and performed as part of lazy
inclusion of MongoDB dependency i.e. add imports only if related
backend is used.
:return: :class:`.MongoApi` instance
"""
self.api.get_cache_collection()
return self.api
def get(self, key):
value = self.client.get(key)
if value is None:
return NO_VALUE
else:
return value
def get_multi(self, keys):
values = self.client.get_multi(keys)
return [
NO_VALUE if key not in values
else values[key] for key in keys
]
def set(self, key, value):
self.client.set(key, value)
def set_multi(self, mapping):
self.client.set_multi(mapping)
def delete(self, key):
self.client.delete(key)
def delete_multi(self, keys):
self.client.delete_multi(keys)
class MongoApi(object):
"""Class handling MongoDB specific functionality.
This class uses PyMongo APIs internally to create database connection
with configured pool size, ensures unique index on key, does database
authentication and ensure TTL collection index if configured so.
This class also serves as handle to cache collection for dogpile cache
APIs.
In a single deployment, multiple cache configuration can be defined. In
that case of multiple cache collections usage, db client connection pool
is shared when cache collections are within same database.
"""
# class level attributes for re-use of db client connection and collection
_DB = {} # dict of db_name: db connection reference
_MONGO_COLLS = {} # dict of cache_collection : db collection reference
def __init__(self, arguments):
self._init_args(arguments)
self._data_manipulator = None
def _init_args(self, arguments):
"""Helper logic for collecting and parsing MongoDB specific arguments.
The arguments passed in are separated out in connection specific
setting and rest of arguments are passed to create/update/delete
db operations.
"""
self.conn_kwargs = {} # connection specific arguments
self.hosts = arguments.pop('db_hosts', None)
if self.hosts is None:
msg = _('db_hosts value is required')
raise exception.ConfigurationError(msg)
self.db_name = arguments.pop('db_name', None)
if self.db_name is None:
msg = _('database db_name is required')
raise exception.ConfigurationError(msg)
self.cache_collection = arguments.pop('cache_collection', None)
if self.cache_collection is None:
msg = _('cache_collection name is required')
raise exception.ConfigurationError(msg)
self.username = arguments.pop('username', None)
self.password = arguments.pop('password', None)
self.max_pool_size = arguments.pop('max_pool_size', 10)
self.w = arguments.pop('w', -1)
try:
self.w = int(self.w)
except ValueError:
msg = _('integer value expected for w (write concern attribute)')
raise exception.ConfigurationError(msg)
self.read_preference = arguments.pop('read_preference', None)
self.use_replica = arguments.pop('use_replica', False)
if self.use_replica:
if arguments.get('replicaset_name') is None:
msg = _('replicaset_name required when use_replica is True')
raise exception.ConfigurationError(msg)
self.replicaset_name = arguments.get('replicaset_name')
self.son_manipulator = arguments.pop('son_manipulator', None)
# set if mongo collection needs to be TTL type.
# This needs to be max ttl for any cache entry.
# By default, -1 means don't use TTL collection.
# With ttl set, it creates related index and have doc_date field with
# needed expiration interval
self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
try:
self.ttl_seconds = int(self.ttl_seconds)
except ValueError:
msg = _('integer value expected for mongo_ttl_seconds')
raise exception.ConfigurationError(msg)
self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
if self.conn_kwargs['ssl']:
ssl_keyfile = arguments.pop('ssl_keyfile', None)
ssl_certfile = arguments.pop('ssl_certfile', None)
ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
if ssl_keyfile:
self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile:
self.conn_kwargs['ssl_certfile'] = ssl_certfile
if ssl_ca_certs:
self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
if ssl_cert_reqs:
self.conn_kwargs['ssl_cert_reqs'] = (
self._ssl_cert_req_type(ssl_cert_reqs))
# rest of arguments are passed to mongo crud calls
self.meth_kwargs = arguments
def _ssl_cert_req_type(self, req_type):
try:
import ssl
except ImportError:
raise exception.ConfigurationError(_('no ssl support available'))
req_type = req_type.upper()
try:
return {
'NONE': ssl.CERT_NONE,
'OPTIONAL': ssl.CERT_OPTIONAL,
'REQUIRED': ssl.CERT_REQUIRED
}[req_type]
except KeyError:
msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
'"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
raise exception.ConfigurationError(msg)
def _get_db(self):
# defer imports until backend is used
global pymongo
import pymongo
if self.use_replica:
connection = pymongo.MongoReplicaSetClient(
host=self.hosts, replicaSet=self.replicaset_name,
max_pool_size=self.max_pool_size, **self.conn_kwargs)
else: # used for standalone node or mongos in sharded setup
connection = pymongo.MongoClient(
host=self.hosts, max_pool_size=self.max_pool_size,
**self.conn_kwargs)
database = getattr(connection, self.db_name)
self._assign_data_mainpulator()
database.add_son_manipulator(self._data_manipulator)
if self.username and self.password:
database.authenticate(self.username, self.password)
return database
def _assign_data_mainpulator(self):
if self._data_manipulator is None:
if self.son_manipulator:
self._data_manipulator = importutils.import_object(
self.son_manipulator)
else:
self._data_manipulator = BaseTransform()
def _get_doc_date(self):
if self.ttl_seconds > 0:
expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
doc_date = timeutils.utcnow() + expire_delta
else:
doc_date = timeutils.utcnow()
return doc_date
def get_cache_collection(self):
if self.cache_collection not in self._MONGO_COLLS:
global pymongo
import pymongo
# re-use db client connection if already defined as part of
# earlier dogpile cache configuration
if self.db_name not in self._DB:
self._DB[self.db_name] = self._get_db()
coll = getattr(self._DB[self.db_name], self.cache_collection)
self._assign_data_mainpulator()
if self.read_preference:
# pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name
f = getattr(pymongo.read_preferences,
'read_pref_mode_from_name', None)
if not f:
f = pymongo.read_preferences.mongos_enum
self.read_preference = f(self.read_preference)
coll.read_preference = self.read_preference
if self.w > -1:
coll.write_concern['w'] = self.w
if self.ttl_seconds > 0:
kwargs = {'expireAfterSeconds': self.ttl_seconds}
coll.ensure_index('doc_date', cache_for=5, **kwargs)
else:
self._validate_ttl_index(coll, self.cache_collection,
self.ttl_seconds)
self._MONGO_COLLS[self.cache_collection] = coll
return self._MONGO_COLLS[self.cache_collection]
def _get_cache_entry(self, key, value, meta, doc_date):
"""MongoDB cache data representation.
Storing cache key as ``_id`` field as MongoDB by default creates
unique index on this field. So no need to create separate field and
index for storing cache key. Cache data has additional ``doc_date``
field for MongoDB TTL collection support.
"""
return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
"""Checks if existing TTL index is removed on a collection.
This logs warning when existing collection has TTL index defined and
new cache configuration tries to disable index with
``mongo_ttl_seconds < 0``. In that case, existing index needs
to be addressed first to make new configuration effective.
Refer to MongoDB documentation around TTL index for further details.
"""
indexes = collection.index_information()
for indx_name, index_data in six.iteritems(indexes):
if all(k in index_data for k in ('key', 'expireAfterSeconds')):
existing_value = index_data['expireAfterSeconds']
fld_present = 'doc_date' in index_data['key'][0]
if fld_present and existing_value > -1 and ttl_seconds < 1:
msg = _LW('TTL index already exists on db collection '
'<%(c_name)s>, remove index <%(indx_name)s> '
'first to make updated mongo_ttl_seconds value '
'to be effective')
LOG.warn(msg, {'c_name': coll_name,
'indx_name': indx_name})
def get(self, key):
critieria = {'_id': key}
result = self.get_cache_collection().find_one(spec_or_id=critieria,
**self.meth_kwargs)
if result:
return result['value']
else:
return None
def get_multi(self, keys):
db_results = self._get_results_as_dict(keys)
return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
def _get_results_as_dict(self, keys):
critieria = {'_id': {'$in': keys}}
db_results = self.get_cache_collection().find(spec=critieria,
**self.meth_kwargs)
return {doc['_id']: doc for doc in db_results}
def set(self, key, value):
doc_date = self._get_doc_date()
ref = self._get_cache_entry(key, value.payload, value.metadata,
doc_date)
spec = {'_id': key}
# find and modify does not have manipulator support
# so need to do conversion as part of input document
ref = self._data_manipulator.transform_incoming(ref, self)
self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
**self.meth_kwargs)
def set_multi(self, mapping):
"""Insert multiple documents specified as key, value pairs.
In this case, multiple documents can be added via insert provided they
do not exist.
Update of multiple existing documents is done one by one
"""
doc_date = self._get_doc_date()
insert_refs = []
update_refs = []
existing_docs = self._get_results_as_dict(list(mapping.keys()))
for key, value in mapping.items():
ref = self._get_cache_entry(key, value.payload, value.metadata,
doc_date)
if key in existing_docs:
ref['_id'] = existing_docs[key]['_id']
update_refs.append(ref)
else:
insert_refs.append(ref)
if insert_refs:
self.get_cache_collection().insert(insert_refs, manipulate=True,
**self.meth_kwargs)
for upd_doc in update_refs:
self.get_cache_collection().save(upd_doc, manipulate=True,
**self.meth_kwargs)
def delete(self, key):
critieria = {'_id': key}
self.get_cache_collection().remove(spec_or_id=critieria,
**self.meth_kwargs)
def delete_multi(self, keys):
critieria = {'_id': {'$in': keys}}
self.get_cache_collection().remove(spec_or_id=critieria,
**self.meth_kwargs)
@six.add_metaclass(abc.ABCMeta)
class AbstractManipulator(object):
"""Abstract class with methods which need to be implemented for custom
manipulation.
Adding this as a base class for :class:`.BaseTransform` instead of adding
import dependency of pymongo specific class i.e.
`pymongo.son_manipulator.SONManipulator` and using that as base class.
This is done to avoid pymongo dependency if MongoDB backend is not used.
"""
@abc.abstractmethod
def transform_incoming(self, son, collection):
"""Used while saving data to MongoDB.
:param son: the SON object to be inserted into the database
:param collection: the collection the object is being inserted into
:returns: transformed SON object
"""
raise NotImplementedError() # pragma: no cover
@abc.abstractmethod
def transform_outgoing(self, son, collection):
"""Used while reading data from MongoDB.
:param son: the SON object being retrieved from the database
:param collection: the collection this object was stored in
:returns: transformed SON object
"""
raise NotImplementedError() # pragma: no cover
def will_copy(self):
"""Will this SON manipulator make a copy of the incoming document?
Derived classes that do need to make a copy should override this
method, returning `True` instead of `False`.
:returns: boolean
"""
return False
class BaseTransform(AbstractManipulator):
"""Base transformation class to store and read dogpile cached data
from MongoDB.
This is needed as dogpile internally stores data as a custom class
i.e. dogpile.cache.api.CachedValue
Note: Custom manipulator needs to always override ``transform_incoming``
and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
checks that overridden method in instance and its super are different.
"""
def transform_incoming(self, son, collection):
"""Used while saving data to MongoDB."""
for (key, value) in list(son.items()):
if isinstance(value, api.CachedValue):
son[key] = value.payload # key is 'value' field here
son['meta'] = value.metadata
elif isinstance(value, dict): # Make sure we recurse into sub-docs
son[key] = self.transform_incoming(value, collection)
return son
def transform_outgoing(self, son, collection):
"""Used while reading data from MongoDB."""
metadata = None
# make sure its top level dictionary with all expected fields names
# present
if isinstance(son, dict) and all(k in son for k in
('_id', 'value', 'meta', 'doc_date')):
payload = son.pop('value', None)
metadata = son.pop('meta', None)
for (key, value) in list(son.items()):
if isinstance(value, dict):
son[key] = self.transform_outgoing(value, collection)
if metadata is not None:
son['value'] = api.CachedValue(payload, metadata)
return son
|
citrix-openstack-build/oslo.cache
|
oslo_cache/backends/mongo.py
|
Python
|
apache-2.0
| 23,697
|
#!/usr/bin/env python
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud import firestore
from google.cloud import secretmanager
from google.api_core.exceptions import NotFound
from base64 import b64decode
from base64 import b64encode
import re
import os
import sys
import hmac
import argparse
import secrets
class Hasher():
def __init__(self, argv=None):
parser = argparse.ArgumentParser(description='Hash and upload SSNs to Firestore')
parser.add_argument('command',
help="Command to execute",
type=str.lower,
choices=["upload", "verify", "create_key"])
parser.add_argument('-p', '--project',
help="Project ID where the Firestore DB should be initialized")
parser.add_argument('-S', '--secret',
help="Fully qualified secret name where the base64 encoded hash key lives")
parser.add_argument('-s', '--salt',
help="Salt for use with the HMAC hash.")
parser.add_argument('-i', '--infile',
help="Newline-separated list of SSNs to be stored in Firestore. They will be normalized by removing the dash character")
parser.add_argument('--region',
help="The region for Firestore. Use `gcloud app regions list` to list available regions",
default='us-west2')
parser.add_argument('--collection',
help="The collection name within the Firestore DB where the Hashed SSNs should be stored",
default='hashed_socials')
self.opts = parser.parse_args(argv)
self.sm = secretmanager.SecretManagerServiceClient()
self.secret_path = f'{self.opts.secret}/versions/latest'
def get_hash_key(self):
try:
version = self.sm.access_secret_version(self.secret_path)
return b64decode(version.payload.data)
except NotFound:
return None
def set_hash_key(self):
if self.get_hash_key():
return False
key = secrets.token_bytes(64)
b64 = b64encode(key)
self.sm.add_secret_version(self.opts.secret, {'data': b64 })
return True
def hash_ssn(self, ssn, key):
norm_ssn = ssn.strip().replace('-', '')
if not re.match(r'[0-9]{9}', norm_ssn):
raise ValueError(f"Normalized SSN from {norm_ssn} is not a 9 digit number")
salt = self.opts.salt.encode('utf-8')
mac = hmac.new(key, msg=salt, digestmod='sha256')
mac.update(norm_ssn.encode('utf-8'))
return mac.hexdigest()
def run(self):
if self.opts.command == "create_key":
if self.set_hash_key():
print(f"Saved secret at {self.secret_path}")
else:
print (f"Hash key already exists at {self.secret_path}")
exit(0)
os.environ["GCLOUD_PROJECT"] = self.opts.project
os.system(f"gcloud alpha firestore databases create --project {self.opts.project} --region {self.opts.region}")
db = firestore.Client()
col = db.collection(self.opts.collection)
key = self.get_hash_key()
if key == None:
raise NotFound("Hash key does not exist yet. Please run `hasher.py create_key --secret $SECRET` first")
if self.opts.command == "upload":
print("Hashing and uploading SSNs...")
for ssn in open(self.opts.infile):
digest = self.hash_ssn(ssn, key)
col.document(digest).set({u'exists': True})
print("Done!")
elif self.opts.command == 'verify':
print("Verifying and counting SSNs...")
count = 0
for ssn in open(self.opts.infile):
digest = self.hash_ssn(ssn, key)
doc = col.document(digest).get()
if doc.exists:
count += 1
print(f"Found {count} valid SSNs")
if __name__ == '__main__':
Hasher(sys.argv[1:]).run()
|
GoogleCloudPlatform/professional-services
|
examples/dataflow-dlp-hash-pipeline/scripts/hasher.py
|
Python
|
apache-2.0
| 4,114
|
# Copyright (c) 2014 LINBIT HA Solutions GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This driver connects Cinder to an installed DRBDmanage instance, see
http://oss.linbit.com/drbdmanage/
http://git.linbit.com/drbdmanage.git/
for more details.
"""
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder.volume import driver
from cinder import context
try:
import dbus
import drbdmanage.consts as dm_const
import drbdmanage.exceptions as dm_exc
import drbdmanage.utils as dm_utils
except ImportError:
dbus = None
dm_const = None
dm_exc = None
dm_utils = None
LOG = logging.getLogger(__name__)
drbd_opts = [
cfg.StrOpt('drbdmanage_redundancy',
default='2',
help='Number of nodes that should replicate the data.'),
# TODO(PM): offsite_redundancy?
# TODO(PM): choose DRBDmanage storage pool?
]
CONF = cfg.CONF
CONF.register_opts(drbd_opts)
CINDER_AUX_PROP_id = "cinder-id"
DM_VN_PREFIX = 'CV_' # sadly 2CV isn't allowed by DRBDmanage
class DrbdManageDriver(driver.VolumeDriver):
"""Cinder driver that uses DRBDmanage as data store.
"""
VERSION = '1.0.0'
drbdmanage_dbus_name = 'org.drbd.drbdmanaged'
drbdmanage_dbus_interface = '/interface'
def __init__(self, execute=None, *args, **kwargs):
self.empty_list = dbus.Array([], signature="a(ss)")
super(DrbdManageDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(drbd_opts)
if not self.drbdmanage_dbus_name:
self.drbdmanage_dbus_name = 'org.drbd.drbdmanaged'
if not self.drbdmanage_dbus_interface:
self.drbdmanage_dbus_interface = '/interface'
self.drbdmanage_redundancy = int(getattr(self.configuration,
'drbdmanage_redundancy', 1))
self.dm_control_vol = ".drbdctrl"
# Copied from the LVM driver, see
# I43190d1dac33748fe55fa00f260f32ab209be656
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize DRBD driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
def dbus_connect(self):
self.odm = dbus.SystemBus().get_object(self.drbdmanage_dbus_name,
self.drbdmanage_dbus_interface)
self.odm.ping()
def call_or_reconnect(self, fn, *args):
"""Call DBUS function; if it got disconnected,
try once to reconnect.
"""
try:
return fn(*args)
except dbus.DBusException as e:
LOG.warn(_LW("got disconnected; trying to reconnect. (%s)") %
six.text_type(e))
self.dbus_connect()
# Old function object is invalid, get new one.
return getattr(self.odm, fn._method_name)(*args)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(DrbdManageDriver, self).do_setup(context)
self.dbus_connect()
def check_for_setup_error(self):
"""Verify that requirements are in place to use DRBDmanage driver."""
if not all((dbus, dm_exc, dm_const, dm_utils)):
msg = _('DRBDmanage driver setup error: some required '
'libraries (dbus, drbdmanage.*) not found.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if self.odm.ping() != 0:
message = _('Cannot ping DRBDmanage backend')
raise exception.VolumeBackendAPIException(data=message)
def _clean_uuid(self):
"""Returns a UUID string, WITHOUT braces."""
# Some uuid library versions put braces around the result!?
# We don't want them, just a plain [0-9a-f-]+ string.
id = str(uuid.uuid4())
return id.translate(None, "{}")
def _check_result(self, res, ignore=None, ret=0):
seen_success = False
seen_error = False
result = ret
for (code, fmt, arg_l) in res:
# convert from DBUS to Python
arg = dict(arg_l)
if ignore and code in ignore:
if not result:
result = code
continue
if code == dm_exc.DM_SUCCESS:
seen_success = True
continue
seen_error = _("Received error string: %s") % (fmt % arg)
if seen_error:
raise exception.VolumeBackendAPIException(data=seen_error)
if seen_success:
return ret
# by default okay - or the ignored error code.
return ret
# DRBDmanage works in kiB units; Cinder uses float GiB.
def _vol_size_to_dm(self, size):
return int(size * units.Gi / units.Ki)
def _vol_size_to_cinder(self, size):
return int(size * units.Ki / units.Gi)
def is_clean_volume_name(self, name):
try:
if name.startswith(CONF.volume_name_template % "") and \
uuid.UUID(name[7:]) is not None:
return DM_VN_PREFIX + name[7:]
except ValueError:
return None
try:
if uuid.UUID(name) is not None:
return DM_VN_PREFIX + name
except ValueError:
return None
def _priv_hash_from_volume(self, volume):
return dm_utils.dict_to_aux_props({
CINDER_AUX_PROP_id: volume.name_id,
})
def snapshot_name_from_cinder_snapshot(self, snapshot):
sn_name = self.is_clean_volume_name(snapshot['id'])
return sn_name
def _res_and_vl_data_for_volume(self, volume, empty_ok=False):
"""A DRBD resource might consist of several "volumes"
(think consistency groups).
So we have to find the number of the volume within one resource.
Returns resource name, volume number, and resource
and volume properties.
"""
# If we get a string, use it as-is.
# Else it's a dictionary; then get the ID.
if type(volume) is str or type(volume) is unicode:
v_uuid = volume
else:
v_uuid = volume.name_id
res, rl = self.call_or_reconnect(self.odm.list_volumes,
self.empty_list,
0,
dm_utils.dict_to_aux_props(
{CINDER_AUX_PROP_id: v_uuid}),
self.empty_list)
self._check_result(res)
if (not rl) or (len(rl) == 0):
if empty_ok:
LOG.debug("No volume %s found." % v_uuid)
return None, None, None, None
raise exception.VolumeBackendAPIException(
data=_("volume %s not found in drbdmanage") % v_uuid)
if len(rl) > 1:
raise exception.VolumeBackendAPIException(
data=_("multiple resources with name %s found by drbdmanage") %
v_uuid)
(r_name, r_props, vols) = rl[0]
if len(vols) != 1:
raise exception.VolumeBackendAPIException(
data=_("not exactly one volume with id %s") %
v_uuid)
(v_nr, v_props) = vols[0]
LOG.debug("volume %s is %s/%d; %s, %s" %
(v_uuid, r_name, v_nr, r_props, v_props))
return r_name, v_nr, r_props, v_props
def _resource_and_snap_data_from_snapshot(self, snapshot, empty_ok=False):
"""Find the DRBD Resource and the snapshot name
from the snapshot ID.
"""
s_uuid = snapshot['id']
res, rs = self.call_or_reconnect(self.odm.list_snapshots,
self.empty_list,
self.empty_list,
dm_utils.dict_to_aux_props(
{CINDER_AUX_PROP_id: s_uuid}),
self.empty_list)
self._check_result(res)
if (not rs) or (len(rs) == 0):
if empty_ok:
return None
else:
raise exception.VolumeBackendAPIException(
data=_("no snapshot with id %s found in drbdmanage") %
s_uuid)
if len(rs) > 1:
raise exception.VolumeBackendAPIException(
data=_("multiple resources with snapshot ID %s found") %
s_uuid)
(r_name, snaps) = rs[0]
if len(snaps) != 1:
raise exception.VolumeBackendAPIException(
data=_("not exactly one snapshot with id %s") % s_uuid)
(s_name, s_props) = snaps[0]
LOG.debug("snapshot %s is %s/%s" % (s_uuid, r_name, s_name))
return r_name, s_name, s_props
def _resource_name_volnr_for_volume(self, volume, empty_ok=False):
res, vol, _, _ = self._res_and_vl_data_for_volume(volume, empty_ok)
return res, vol
def local_path(self, volume):
dres, dvol = self._resource_name_volnr_for_volume(volume)
res, data = self.call_or_reconnect(self.odm.text_query,
[dm_const.TQ_GET_PATH,
dres,
str(dvol)])
self._check_result(res)
if len(data) == 1:
return data[0]
message = _('Got bad path information from DRBDmanage! (%s)') % data
raise exception.VolumeBackendAPIException(data=message)
def create_volume(self, volume):
"""Creates a DRBD resource.
We address it later on via the ID that gets stored
as a private property.
"""
# TODO(PM): consistency groups
dres = self.is_clean_volume_name(volume.name_id)
LOG.debug("create vol: make %s" % dres)
res = self.call_or_reconnect(self.odm.create_resource,
dres,
self.empty_list)
exist = self._check_result(res, ignore=[dm_exc.DM_EEXIST], ret=None)
if exist == dm_exc.DM_EEXIST:
# Volume already exists, eg. because deploy gave an error
# on a previous try (like ENOSPC)
pass
else:
props = self._priv_hash_from_volume(volume)
# TODO(PM): properties - redundancy, etc
res = self.call_or_reconnect(self.odm.create_volume,
dres,
self._vol_size_to_dm(volume['size']),
props)
self._check_result(res)
res = self.call_or_reconnect(self.odm.auto_deploy,
dres, self.drbdmanage_redundancy,
0, True)
self._check_result(res)
return 0
def volume_exists_on_current_host(self, lv_id):
"""Checks if a volume whose name_id is equal to lv_id
already exists on host
"""
ctx = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctx, self.host)
for v in volumes:
if v.name_id == lv_id:
LOG.debug("id %s exists on volume %s" % lv_id, v['id'])
return True
return False
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_id = existing_ref['source-id']
return { 'name_id': lv_id }
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <id of LV>}
"""
# Check that the reference is valid
if 'source-id' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_id = existing_ref['source-id']
if self.volume_exists_on_current_host(lv_id):
kwargs = {'existing_ref': lv_id,
'reason': 'Specified logical volume already managed'}
raise exception.ManageExistingInvalidReference(**kwargs)
try:
(r_name, v_nr, r_props, v_props) =\
self._res_and_vl_data_for_volume(lv_id)
except:
kwargs = {'existing_ref': lv_id,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
return self._vol_size_to_cinder(int(str(v_props['vol_size'])))
def delete_volume(self, volume):
"""Deletes a resource."""
dres, dvol = self._resource_name_volnr_for_volume(
volume,
empty_ok=True)
if not dres:
# OK, already gone.
return 0
# TODO(PM): check if in use? Ask whether Primary, or just check result?
res = self.call_or_reconnect(self.odm.remove_volume, dres, dvol, False)
return self._check_result(res, ignore=[dm_exc.DM_ENOENT])
# TODO(PM): delete resource if empty?
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug("create vol from snap: from %s make %s" %
(snapshot['id'], volume.name_id))
# TODO(PM): Consistency groups.
dres, sname, sprop = self._resource_and_snap_data_from_snapshot(
snapshot)
new_res = self.is_clean_volume_name(volume.name_id)
r_props = self.empty_list
v_props = self._priv_hash_from_volume(volume)
res = self.call_or_reconnect(self.odm.restore_snapshot,
new_res,
dres,
sname,
r_props,
v_props)
return self._check_result(res, ignore=[dm_exc.DM_ENOENT])
def create_cloned_volume(self, volume, src_vref):
temp_id = self._clean_uuid()
snapshot = {'id': temp_id}
self.create_snapshot(dict(snapshot.items() +
[('volume_id', src_vref.name_id)]))
self.create_volume_from_snapshot(volume, snapshot)
self.delete_snapshot(snapshot)
def _update_volume_stats(self):
LOG.debug("Updating volume stats")
data = {}
data["vendor_name"] = 'LINBIT'
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = "iSCSI"
# This has to match the name set in the cinder volume driver spec,
# so keep it lowercase
data["volume_backend_name"] = "drbdmanage"
data["pools"] = []
res, free, total = self.call_or_reconnect(self.odm.cluster_free_query,
self.drbdmanage_redundancy)
self._check_result(res)
location_info =\
('DrbdManageDriver:%(cvol)s:%(dbus)s' %
{'cvol': self.dm_control_vol,
'dbus': self.drbdmanage_dbus_name})
# TODO(PM): multiple DRBDmanage instances and/or multiple pools
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
free_capacity_gb=self._vol_size_to_cinder(free),
total_capacity_gb=self._vol_size_to_cinder(total),
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False))
data["pools"].append(single_pool)
self._stats = data
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
dres, dvol = self._resource_name_volnr_for_volume(volume)
res = self.call_or_reconnect(self.odm.resize_volume,
dres, dvol, -1,
{"size": self._vol_size_to_dm(new_size)},
0)
self._check_result(res)
return 0
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
sn_name = self.snapshot_name_from_cinder_snapshot(snapshot)
LOG.debug("create snapshot: from %s make %s" %
(snapshot['volume_id'], snapshot['id']))
dres, dvol = self._resource_name_volnr_for_volume(
snapshot["volume_id"])
res, data = self.call_or_reconnect(self.odm.list_assignments,
self.empty_list,
[dres],
0,
self.empty_list,
self.empty_list)
self._check_result(res)
nodes = map(lambda d: d[0], data)
if len(nodes) < 1:
raise exception.VolumeBackendAPIException(
_('Snapshot res "%s" that is not deployed anywhere?') %
(dres))
props = self._priv_hash_from_volume(snapshot)
res = self.call_or_reconnect(self.odm.create_snapshot,
dres, sn_name, nodes, props)
return self._check_result(res)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
force = False # during testing
dres, sname, _ = self._resource_and_snap_data_from_snapshot(
snapshot, empty_ok=not force)
if not dres:
# resource already gone?
if force:
return 0
raise exception.VolumeBackendAPIException(
_('Resource "%(res)s" for snapshot "%(sn)s" not found') %
{"res": dres, "sn": sname})
res = self.call_or_reconnect(self.odm.remove_snapshot,
dres, sname, force)
return self._check_result(res, ignore=[dm_exc.DM_ENOENT])
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = self.local_path(volume)
return self.target_driver.ensure_export(
context,
volume,
volume_path)
def create_export(self, context, volume):
volume_path = self.local_path(volume)
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
return self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return None
|
os-cloud-storage/openstack-workload-disaster-recovery
|
contrib/cinder/volume/drivers/drbdmanagedrv.kilo.py
|
Python
|
apache-2.0
| 21,317
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
# TODO(ebrevdo): Remove once _linear is fully deprecated.
# pylint: disable=protected-access
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear as linear
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
# pylint: enable=protected-access
class RNNCellTest(test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = linear([x], 2, False)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = linear([x], 2, False)
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = linear([x], 2, False)
with variable_scope.variable_scope(new_scope, reuse=True):
linear([l1], 2, False)
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = core_rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual(
["root/basic_rnn_cell/%s:0"
% core_rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0"
% core_rnn_cell_impl._BIAS_VARIABLE_NAME],
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.test_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = core_rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual(
["root/basic_rnn_cell/%s:0"
% core_rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0"
% core_rnn_cell_impl._BIAS_VARIABLE_NAME],
[v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = core_rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test GRUCell with input_size != num_units.
m = array_ops.zeros([1, 2])
g, _ = core_rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g],
{x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 8])
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.BasicLSTMCell(
2, state_is_tuple=False) for _ in range(2)],
state_is_tuple=False)
g, out_m = cell(x, m)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0"
% core_rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0"
% core_rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0"
% core_rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0"
% core_rnn_cell_impl._BIAS_VARIABLE_NAME]
self.assertEqual(
expected_variable_names, [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[
0.68967271, 0.68967271, 0.44848421, 0.44848421, 0.39897051,
0.39897051, 0.24024698, 0.24024698
]])
self.assertAllClose(res[1], expected_mem)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = array_ops.zeros([1, 4])
g, out_m = core_rnn_cell_impl.BasicLSTMCell(
2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], core_rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], core_rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, core_rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, core_rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(
isinstance(zero_state[0], core_rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(zero_state[1], core_rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, core_rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, core_rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.BasicLSTMCell(
2, state_is_tuple=False) for _ in range(2)],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = core_rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.test_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = core_rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/weights")
self.assertEquals(variables[1].op.name, "root/lstm_cell/biases")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/weights")
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = core_rnn_cell_impl.OutputProjectionWrapper(
core_rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = core_rnn_cell_impl.InputProjectionWrapper(
core_rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testResidualWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = core_rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
g_res, m_new_res = core_rnn_cell_impl.ResidualWrapper(base_cell)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = core_rnn_cell_impl.DeviceWrapper(
core_rnn_cell_impl.GRUCell(3), "/cpu:14159")
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
with self.test_session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = core_rnn_cell_impl.DeviceWrapper(
core_rnn_cell_impl.GRUCell(3), "/gpu:0")
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
# def testUsingSecondCellInScopeWithExistingVariablesFails(self):
# # This test should go away when this behavior is no longer an
# # error (Approx. May 2017)
# cell1 = core_rnn_cell_impl.LSTMCell(3)
# cell2 = core_rnn_cell_impl.LSTMCell(3)
# x = array_ops.zeros([1, 3])
# m = core_rnn_cell_impl.LSTMStateTuple(*[array_ops.zeros([1, 3])] * 2)
# cell1(x, m)
# with self.assertRaisesRegexp(ValueError, r"LSTMCell\(..., reuse=True\)"):
# cell2(x, m)
# def testUsingCellInDifferentScopeFromFirstCallFails(self):
# # This test should go away when this behavior is no longer an
# # error (Approx. May 2017)
# cell = core_rnn_cell_impl.LSTMCell(3)
# x = array_ops.zeros([1, 3])
# m = core_rnn_cell_impl.LSTMStateTuple(*[array_ops.zeros([1, 3])] * 2)
# with variable_scope.variable_scope("scope1"):
# cell(x, m)
# with variable_scope.variable_scope("scope2"):
# with self.assertRaisesRegexp(ValueError, r"Attempt to reuse RNNCell"):
# cell(x, m)
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = core_rnn_cell_impl.EmbeddingWrapper(
core_rnn_cell_impl.GRUCell(2),
embedding_classes=3,
embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = core_rnn_cell_impl.EmbeddingWrapper(
core_rnn_cell_impl.BasicLSTMCell(
1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
_, ml = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class DropoutWrapperTest(test.TestCase):
def _testDropoutWrapper(self, batch_size=None, time_steps=None,
parallel_iterations=None, **kwargs):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = core_rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]],
dtype=dtypes.float32)] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = core_rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]] * batch_size,
dtype=dtypes.float32)] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=core_rnn_cell_impl.DropoutWrapper(
core_rnn_cell_impl.LSTMCell(3),
dtype=x.dtype,
**kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x, initial_state=m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testDropoutWrapperKeepAllConstantInput(self):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_none,
state_keep_prob=keep_all)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoState(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_all,
state_keep_prob=keep_none)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
self.assertAllClose(np.zeros(res[1].c.shape), res[1].c)
def testDropoutWrapperKeepNoInput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none, output_keep_prob=keep_all,
state_keep_prob=keep_all)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_some,
state_keep_prob=keep_all, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
class SlimRNNCellTest(test.TestCase):
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
my_cell = functools.partial(basic_rnn_cell, num_units=2)
# pylint: disable=protected-access
g, _ = core_rnn_cell_impl._SlimRNNCell(my_cell)(x, m)
# pylint: enable=protected-access
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellMatch(self):
batch_size = 32
input_size = 100
num_units = 10
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inputs = random_ops.random_uniform((batch_size, input_size))
_, initial_state = basic_rnn_cell(inputs, None, num_units)
rnn_cell = core_rnn_cell_impl.BasicRNNCell(num_units)
outputs, state = rnn_cell(inputs, initial_state)
variable_scope.get_variable_scope().reuse_variables()
my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
# pylint: disable=protected-access
slim_cell = core_rnn_cell_impl._SlimRNNCell(my_cell)
# pylint: enable=protected-access
slim_outputs, slim_state = slim_cell(inputs, initial_state)
self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
self.assertEqual(slim_state.get_shape(), state.get_shape())
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([slim_outputs, slim_state, outputs, state])
self.assertAllClose(res[0], res[2])
self.assertAllClose(res[1], res[3])
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(linear([inputs, state], num_units, True))
return output, output
if __name__ == "__main__":
test.main()
|
chris-chris/tensorflow
|
tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py
|
Python
|
apache-2.0
| 34,704
|
import sys, os
sys.path.insert(1, "../../../")
import h2o, tests
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def ecologyGBM(ip,port):
#Log.info("Importing ecology_model.csv data...\n")
ecology_train = h2o.import_file(path=h2o.locate("smalldata/gbm_test/ecology_model.csv"))
#Log.info("Summary of the ecology data from h2o: \n")
#ecology.summary()
# Log.info("==============================")
# Log.info("H2O GBM Params: ")
# Log.info("x = ecology_train[2:14]")
# Log.info("y = ecology_train["Angaus"]")
# Log.info("ntrees = 100")
# Log.info("max_depth = 5")
# Log.info("min_rows = 10")
# Log.info("learn_rate = 0.1")
# Log.info("==============================")
# Log.info("==============================")
# Log.info("scikit GBM Params: ")
# Log.info("learning_rate=0.1")
# Log.info("n_estimators=100")
# Log.info("max_depth=5")
# Log.info("min_samples_leaf = 10")
# Log.info("n.minobsinnode = 10")
# Log.info("max_features=None")
# Log.info("==============================")
ntrees = 100
max_depth = 5
min_rows = 10
learn_rate = 0.1
# Prepare data for scikit use
trainData = np.genfromtxt(h2o.locate("smalldata/gbm_test/ecology_model.csv"),
delimiter=',',
dtype=None,
names=("Site","Angaus","SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"),
skip_header=1,
missing_values=('NA'),
filling_values=(np.nan))
trainDataResponse = trainData["Angaus"]
trainDataFeatures = trainData[["SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"]]
ecology_train["Angaus"] = ecology_train["Angaus"].asfactor()
# Train H2O GBM Model:
gbm_h2o = h2o.gbm(x=ecology_train[2:], y=ecology_train["Angaus"], ntrees=ntrees, learn_rate=learn_rate,
max_depth=max_depth, min_rows=min_rows, distribution="bernoulli")
# Train scikit GBM Model:
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(trainDataFeatures[:,np.newaxis],trainDataResponse)
# Evaluate the trained models on test data
# Load the test data (h2o)
ecology_test = h2o.import_file(path=h2o.locate("smalldata/gbm_test/ecology_eval.csv"))
# Load the test data (scikit)
testData = np.genfromtxt(h2o.locate("smalldata/gbm_test/ecology_eval.csv"),
delimiter=',',
dtype=None,
names=("Angaus","SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"),
skip_header=1,
missing_values=('NA'),
filling_values=(np.nan))
testDataResponse = testData["Angaus"]
testDataFeatures = testData[["SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"]]
# Score on the test data and compare results
# scikit
auc_sci = roc_auc_score(testDataResponse, gbm_sci.predict_proba(testDataFeatures[:,np.newaxis])[:,1])
# h2o
gbm_perf = gbm_h2o.model_performance(ecology_test)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
tests.run_test(sys.argv, ecologyGBM)
|
bospetersen/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_ecologyGBM.py
|
Python
|
apache-2.0
| 4,088
|
#!/usr/bin/env python
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import snap_plugin.v1 as snap
LOG = logging.getLogger(__name__)
class RollingAverage(snap.Processor):
"""
Calculates a rolling average of the metrics and publishes this
"""
metrics_buffer = {}
def process(self, metrics, config):
for metric in metrics:
self.rotate_buffer(metric, config)
return self.generate_averages(config)
def generate_averages(self, config):
average_metrics = []
for namespace_key, metric_list in self.metrics_buffer.iteritems():
count = len(metric_list)
if count > 0:
total_value = sum([metric.data
for metric in metric_list])
average_value = total_value / count
average_metric = self.duplicate_metric(
metric_list[-1], config)
average_metric.data = str(average_value)
average_metrics.append(average_metric)
return average_metrics
def duplicate_metric(self, seed_metric, config):
now = time.time()
duplicated_metric = snap.metric.Metric(
namespace=[element for element in seed_metric.namespace],
version=seed_metric.version,
# tags=[tag for tag in seed_metric.tags], TODO - this fails with "TypeError: The 'tags' kwarg requires a dict of strings. (given: `<class 'google.protobuf.internal.containers.ScalarMap'>`)"
config=seed_metric.config,
timestamp=now,
unit=seed_metric.unit,
description=seed_metric.description
)
duplicated_metric.namespace.add_static_element(
config['average-suffix'])
return duplicated_metric
def concatenate_namespace(self, namespace):
concat_value = ''
for namespace_element in namespace:
concat_value += namespace_element.value
return concat_value
def rotate_buffer(self, metric, config):
namespace_key = self.concatenate_namespace(metric.namespace)
namespaced_buffer = self.metrics_buffer.get(namespace_key, [])
namespaced_buffer = namespaced_buffer[:4]
namespaced_buffer.append(metric)
self.metrics_buffer[namespace_key] = namespaced_buffer
def get_config_policy(self):
"""Get's the config policy
The config policy for this plugin defines a string configuration item
`instance-id` with the default value of `xyz-abc-qwerty`.
"""
return snap.ConfigPolicy(
[
None,
[
(
"average-suffix",
snap.StringRule(default='average')
),
(
"buffer-length",
snap.IntegerRule(default='10')
)
]
]
)
if __name__ == "__main__":
plugin_name = "rolling-average"
plugin_version = 1
RollingAverage(plugin_name,
plugin_version).start_plugin()
# example = [
# {"timestamp": "2017-01-04T09:59:09.006637096Z", "namespace": "/intel/psutil/load/load1", "data": 0.09, "unit": "",
# "tags": {"plugin_running_on": "pukka", "sequence-id": "4031"}, "version": 0,
# "last_advertised_time": "2017-01-04T11:06:46.124337372Z"},
# {"timestamp": "2017-01-04T09:59:09.006637096Z", "namespace": "/intel/psutil/load/load5", "data": 0.08, "unit": "",
# "tags": {"plugin_running_on": "pukka", "sequence-id": "4031"}, "version": 0,
# "last_advertised_time": "2017-01-04T11:06:46.12433803Z"},
# {"timestamp": "2017-01-04T09:59:09.006637096Z", "namespace": "/intel/psutil/load/load15", "data": 0.01, "unit": "",
# "tags": {"plugin_running_on": "pukka", "sequence-id": "4031"}, "version": 0,
# "last_advertised_time": "2017-01-04T11:06:46.12433849Z"},
# {"timestamp": "2017-01-04T09:59:09.006637096Z", "namespace": "/intel/psutil/cpu/cpu-total/user", "data": 69.57,
# "unit": "", "tags": {"plugin_running_on": "pukka", "sequence-id": "4031"}, "version": 0,
# "last_advertised_time": "2017-01-04T11:06:46.124338965Z"},
# {"timestamp": "2017-01-04T09:59:09.006637096Z", "namespace": "/intel/psutil/cpu/cpu-total/idle", "data": 20103.42,
# "unit": "", "tags": {"plugin_running_on": "pukka", "sequence-id": "4031"}, "version": 0,
# "last_advertised_time": "2017-01-04T11:06:46.124339437Z"},
# {"timestamp": "2017-01-04T09:59:09.006637096Z", "namespace": "/intel/psutil/cpu/cpu-total/system", "data": 33.51,
# "unit": "", "tags": {"plugin_running_on": "pukka", "sequence-id": "4031"}, "version": 0,
# "last_advertised_time": "2017-01-04T11:06:46.124339892Z"}]
#
|
handknitted/vagrant_snap
|
processor/rolling_average/rolling_average_processor.py
|
Python
|
apache-2.0
| 5,420
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/mike/catkin_ws/install/include".split(';') if "/home/mike/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "sensor_msgs;tf;urdf;diagnostic_updater;schunk_libm5api;libntcan;libpcan;cob_srvs;brics_actuator;diagnostic_msgs;control_msgs;message_runtime;roscpp;dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lbcap_client;-lbcap_server".split(';') if "-lbcap_client;-lbcap_server" != "" else []
PROJECT_NAME = "phd"
PROJECT_SPACE_DIR = "/home/mike/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
mikewrock/phd_backup_full
|
build/phd/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
apache-2.0
| 658
|
"""Support for Australian BOM (Bureau of Meteorology) weather service."""
import datetime
import ftplib
import gzip
import io
import json
import logging
import os
import re
import zipfile
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
TEMP_CELSIUS,
CONF_NAME,
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_RESOURCE = "http://www.bom.gov.au/fwo/{}/{}.{}.json"
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_STATION_ID = "station_id"
ATTR_STATION_NAME = "station_name"
ATTR_ZONE_ID = "zone_id"
ATTRIBUTION = "Data provided by the Australian Bureau of Meteorology"
CONF_STATION = "station"
CONF_ZONE_ID = "zone_id"
CONF_WMO_ID = "wmo_id"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
SENSOR_TYPES = {
"wmo": ["wmo", None],
"name": ["Station Name", None],
"history_product": ["Zone", None],
"local_date_time": ["Local Time", None],
"local_date_time_full": ["Local Time Full", None],
"aifstime_utc": ["UTC Time Full", None],
"lat": ["Lat", None],
"lon": ["Long", None],
"apparent_t": ["Feels Like C", TEMP_CELSIUS],
"cloud": ["Cloud", None],
"cloud_base_m": ["Cloud Base", None],
"cloud_oktas": ["Cloud Oktas", None],
"cloud_type_id": ["Cloud Type ID", None],
"cloud_type": ["Cloud Type", None],
"delta_t": ["Delta Temp C", TEMP_CELSIUS],
"gust_kmh": ["Wind Gust kmh", "km/h"],
"gust_kt": ["Wind Gust kt", "kt"],
"air_temp": ["Air Temp C", TEMP_CELSIUS],
"dewpt": ["Dew Point C", TEMP_CELSIUS],
"press": ["Pressure mb", "mbar"],
"press_qnh": ["Pressure qnh", "qnh"],
"press_msl": ["Pressure msl", "msl"],
"press_tend": ["Pressure Tend", None],
"rain_trace": ["Rain Today", "mm"],
"rel_hum": ["Relative Humidity", "%"],
"sea_state": ["Sea State", None],
"swell_dir_worded": ["Swell Direction", None],
"swell_height": ["Swell Height", "m"],
"swell_period": ["Swell Period", None],
"vis_km": ["Visability km", "km"],
"weather": ["Weather", None],
"wind_dir": ["Wind Direction", None],
"wind_spd_kmh": ["Wind Speed kmh", "km/h"],
"wind_spd_kt": ["Wind Speed kt", "kt"],
}
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
station = station.replace(".shtml", "")
if not re.fullmatch(r"ID[A-Z]\d\d\d\d\d\.\d\d\d\d\d", station):
raise vol.error.Invalid("Malformed station ID")
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_ZONE_ID, "Deprecated partial station ID"): cv.string,
vol.Inclusive(CONF_WMO_ID, "Deprecated partial station ID"): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM sensor."""
station = config.get(CONF_STATION)
zone_id, wmo_id = config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)
if station is not None:
if zone_id and wmo_id:
_LOGGER.warning(
"Using config %s, not %s and %s for BOM sensor",
CONF_STATION,
CONF_ZONE_ID,
CONF_WMO_ID,
)
elif zone_id and wmo_id:
station = "{}.{}".format(zone_id, wmo_id)
else:
station = closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir,
)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM Current: %s", err)
return
add_entities(
[
BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME))
for variable in config[CONF_MONITORED_CONDITIONS]
]
)
class BOMCurrentSensor(Entity):
"""Implementation of a BOM current sensor."""
def __init__(self, bom_data, condition, stationname):
"""Initialize the sensor."""
self.bom_data = bom_data
self._condition = condition
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
if self.stationname is None:
return "BOM {}".format(SENSOR_TYPES[self._condition][0])
return "BOM {} {}".format(self.stationname, SENSOR_TYPES[self._condition][0])
@property
def state(self):
"""Return the state of the sensor."""
return self.bom_data.get_reading(self._condition)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.bom_data.last_updated,
ATTR_SENSOR_ID: self._condition,
ATTR_STATION_ID: self.bom_data.latest_data["wmo"],
ATTR_STATION_NAME: self.bom_data.latest_data["name"],
ATTR_ZONE_ID: self.bom_data.latest_data["history_product"],
}
return attr
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES[self._condition][1]
def update(self):
"""Update current conditions."""
self.bom_data.update()
class BOMCurrentData:
"""Get data from BOM."""
def __init__(self, station_id):
"""Initialize the data object."""
self._zone_id, self._wmo_id = station_id.split(".")
self._data = None
self.last_updated = None
def _build_url(self):
"""Build the URL for the requests."""
url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id)
_LOGGER.debug("BOM URL: %s", url)
return url
@property
def latest_data(self):
"""Return the latest data object."""
if self._data:
return self._data[0]
return None
def get_reading(self, condition):
"""Return the value for the given condition.
BOM weather publishes condition readings for weather (and a few other
conditions) at intervals throughout the day. To avoid a `-` value in
the frontend for these conditions, we traverse the historical data
for the latest value that is not `-`.
Iterators are used in this method to avoid iterating needlessly
through the entire BOM provided dataset.
"""
condition_readings = (entry[condition] for entry in self._data)
return next((x for x in condition_readings if x != "-"), None)
def should_update(self):
"""Determine whether an update should occur.
BOM provides updated data every 30 minutes. We manually define
refreshing logic here rather than a throttle to keep updates
in lock-step with BOM.
If 35 minutes has passed since the last BOM data update, then
an update should be done.
"""
if self.last_updated is None:
# Never updated before, therefore an update should occur.
return True
now = datetime.datetime.now()
update_due_at = self.last_updated + datetime.timedelta(minutes=35)
return now > update_due_at
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from BOM."""
if not self.should_update():
_LOGGER.debug(
"BOM was updated %s minutes ago, skipping update as"
" < 35 minutes, Now: %s, LastUpdate: %s",
(datetime.datetime.now() - self.last_updated),
datetime.datetime.now(),
self.last_updated,
)
return
try:
result = requests.get(self._build_url(), timeout=10).json()
self._data = result["observations"]["data"]
# set lastupdate using self._data[0] as the first element in the
# array is the latest date in the json
self.last_updated = datetime.datetime.strptime(
str(self._data[0]["local_date_time_full"]), "%Y%m%d%H%M%S"
)
return
except ValueError as err:
_LOGGER.error("Check BOM %s", err.args)
self._data = None
raise
def _get_bom_stations():
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
This function does several MB of internet requests, so please use the
caching version to minimise latency and hit-count.
"""
latlon = {}
with io.BytesIO() as file_obj:
with ftplib.FTP("ftp.bom.gov.au") as ftp:
ftp.login()
ftp.cwd("anon2/home/ncc/metadata/sitelists")
ftp.retrbinary("RETR stations.zip", file_obj.write)
file_obj.seek(0)
with zipfile.ZipFile(file_obj) as zipped:
with zipped.open("stations.txt") as station_txt:
for _ in range(4):
station_txt.readline() # skip header
while True:
line = station_txt.readline().decode().strip()
if len(line) < 120:
break # end while loop, ignoring any footer text
wmo, lat, lon = (
line[a:b].strip() for a, b in [(128, 134), (70, 78), (79, 88)]
)
if wmo != "..":
latlon[wmo] = (float(lat), float(lon))
zones = {}
pattern = (
r'<a href="/products/(?P<zone>ID[A-Z]\d\d\d\d\d)/'
r'(?P=zone)\.(?P<wmo>\d\d\d\d\d).shtml">'
)
for state in ("nsw", "vic", "qld", "wa", "tas", "nt"):
url = "http://www.bom.gov.au/{0}/observations/{0}all.shtml".format(state)
for zone_id, wmo_id in re.findall(pattern, requests.get(url).text):
zones[wmo_id] = zone_id
return {"{}.{}".format(zones[k], k): latlon[k] for k in set(latlon) & set(zones)}
def bom_stations(cache_dir):
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
Results from internet requests are cached as compressed JSON, making
subsequent calls very much faster.
"""
cache_file = os.path.join(cache_dir, ".bom-stations.json.gz")
if not os.path.isfile(cache_file):
stations = _get_bom_stations()
with gzip.open(cache_file, "wt") as cache:
json.dump(stations, cache, sort_keys=True)
return stations
with gzip.open(cache_file, "rt") as cache:
return {k: tuple(v) for k, v in json.load(cache).items()}
def closest_station(lat, lon, cache_dir):
"""Return the ZONE_ID.WMO_ID of the closest station to our lat/lon."""
if lat is None or lon is None or not os.path.isdir(cache_dir):
return
stations = bom_stations(cache_dir)
def comparable_dist(wmo_id):
"""Create a psudeo-distance from latitude/longitude."""
station_lat, station_lon = stations[wmo_id]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(stations, key=comparable_dist)
|
fbradyirl/home-assistant
|
homeassistant/components/bom/sensor.py
|
Python
|
apache-2.0
| 11,621
|
import os
CONFIGURATION_NAME = 'ResourcesManager.conf'
SEPARATOR = ' = '
NEW_LINE = '\n'
TRUE = 'true'
FALSE = 'false'
FOLDER_NAME = 'ResourcesManager'
PROCESSED_FILES_FILE_NAME = 'processed.list'
PENDING_FILES_FILE_NAME = 'pending.list'
DEFAULT_EVENT_BUS_NAME = 'EventBus'
DEFAULT_KEYS = 'CFBundleURLTypes, UIAppFonts'
TURN_TO_CAMEL_KEY = 'turn_to_camel'
TURN_TO_CAMEL_CHARACTERS_KEY = 'turn_to_camel_characters'
REPLACEMENTS_KEY = 'characters_to_replace'
CHANGE_R_SWIFT_STRING_KEY = 'change_r_swift_strings'
STRINGS_CONTAINER_TYPE_KEY = 'parsed_strings_container_type'
LOCALIZATIONS_PATH_KEY = 'localizations_files_path'
LOCALIZATIONS_TABLE_NAME_KEY = 'localizations_table_name'
GENERATED_FOLDER_NAME_KEY = 'generated_folder_name'
PROCESSED_FILES_NAME_KEY = 'processed_list_file_name'
NEEDS_PROCESSING_FILES_NAME_KEY = 'pending_files_list_file_name'
PROCESS_FILES_KEY = 'process_files'
PARSE_STRINGS_KEY = 'parse_strings'
EVENT_BUS_NAME_KEY = 'event_bus_class_name'
INFO_PLIST_PATH_KEY = 'info_plist_path'
GENERATE_INFO_PLIST_PARSER_KEY = 'generate_info_plist_parser'
NEEDED_KEYS_KEY = 'parser_for_following_keys'
GENERATE_IMAGES_RESOURCE_KEY = 'generate_images_resource'
IMAGES_CONTAINER_KEY = 'images_container'
IS_DEFAULT_TEMPLATE_IMAGE_KEY = 'should_use_always_template_by_default'
GENERATE_FONTS_RESOURCE_KEY = 'generate_fonts_resource'
FONTS_CONTAINER_TYPE_KEY = 'fonts_container_type'
GENERATE_IB_RESOURCES = 'generate_interface_builder_resources'
GENERATE_MAPPING_KEY = 'generate_mapping'
MAPPING_PATH_KEY = "mapping_path"
class ContainerType:
def __init__(self):
raise NotImplementedError
EXTENSION = 'extension'
CLASS = 'class'
def get_full_path_to_conf(path):
if path[-1] == '/' or path[-1] == '\\':
return path + CONFIGURATION_NAME
else:
return path + "/" + CONFIGURATION_NAME
def generate_default_configuration(path):
conf = open(get_full_path_to_conf(path), 'w+')
_put_value_in_configuration(conf, TURN_TO_CAMEL_KEY, TRUE)
_put_value_in_configuration(conf, TURN_TO_CAMEL_CHARACTERS_KEY, '_-')
_put_value_in_configuration(conf, REPLACEMENTS_KEY, '._')
_put_value_in_configuration(conf, CHANGE_R_SWIFT_STRING_KEY, FALSE)
_put_value_in_configuration(conf, STRINGS_CONTAINER_TYPE_KEY, ContainerType.EXTENSION)
_put_value_in_configuration(conf, LOCALIZATIONS_TABLE_NAME_KEY, 'Localizable')
_put_value_in_configuration(conf, GENERATED_FOLDER_NAME_KEY, FOLDER_NAME)
_put_value_in_configuration(conf, PROCESSED_FILES_NAME_KEY, PROCESSED_FILES_FILE_NAME)
_put_value_in_configuration(conf, NEEDS_PROCESSING_FILES_NAME_KEY, PENDING_FILES_FILE_NAME)
_put_value_in_configuration(conf, PROCESS_FILES_KEY, TRUE)
_put_value_in_configuration(conf, PARSE_STRINGS_KEY, TRUE)
_put_value_in_configuration(conf, EVENT_BUS_NAME_KEY, DEFAULT_EVENT_BUS_NAME)
_put_value_in_configuration(conf, GENERATE_INFO_PLIST_PARSER_KEY, TRUE)
_put_value_in_configuration(conf, NEEDED_KEYS_KEY, DEFAULT_KEYS)
_put_value_in_configuration(conf, GENERATE_IMAGES_RESOURCE_KEY, TRUE)
_put_value_in_configuration(conf, IS_DEFAULT_TEMPLATE_IMAGE_KEY, TRUE)
_put_value_in_configuration(conf, IMAGES_CONTAINER_KEY, ContainerType.EXTENSION)
_put_value_in_configuration(conf, GENERATE_FONTS_RESOURCE_KEY, TRUE)
_put_value_in_configuration(conf, FONTS_CONTAINER_TYPE_KEY, ContainerType.EXTENSION)
_put_value_in_configuration(conf, GENERATE_IB_RESOURCES, TRUE)
_put_value_in_configuration(conf, GENERATE_MAPPING_KEY, FALSE)
conf.close()
def _put_value_in_configuration(conf, key, value):
conf.write(key + SEPARATOR + value + NEW_LINE)
def read_configuration(path):
file_path = get_full_path_to_conf(path)
if not os.path.isfile(file_path):
generate_default_configuration(path)
configuration = {}
conf = open(file_path, 'r')
for line in conf.readlines():
splitted = line.split(SEPARATOR)
if len(splitted) == 2:
configuration[splitted[0].strip()] = splitted[1].strip()
if NEEDED_KEYS_KEY in configuration:
configuration[NEEDED_KEYS_KEY] = [x.strip() for x in configuration[NEEDED_KEYS_KEY].split(',')]
conf.close()
return configuration
def synchronize_configuration(path, conf):
conf_file = open(get_full_path_to_conf(path), 'w+')
for key in conf:
if key != NEEDED_KEYS_KEY:
_put_value_in_configuration(conf_file, key, str(conf[key]))
conf_file.close()
|
IljaKosynkin/ResourcesManager
|
ResourceManager/scripts/Configuration.py
|
Python
|
apache-2.0
| 4,487
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import gc
import json
import os
from subprocess import Popen, PIPE
import sys
from tempfile import mkdtemp
from textwrap import dedent
from time import sleep, time
from collections import defaultdict
import unittest
from uuid import uuid4
import shutil
import six
from six.moves.http_client import HTTPConnection
from six.moves.urllib.parse import urlparse
from swiftclient import get_auth, head_account, client
from swift.common import internal_client, direct_client, utils
from swift.common.direct_client import DirectClientException
from swift.common.ring import Ring
from swift.common.utils import hash_path, md5, \
readconf, renamer, rsync_module_interpolation
from swift.common.manager import Manager
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from swift.obj.diskfile import get_data_dir
from test.debug_logger import capture_logger
from test.probe import CHECK_SERVER_TIMEOUT, VALIDATE_RSYNC, PROXY_BASE_URL
ENABLED_POLICIES = [p for p in POLICIES if not p.is_deprecated]
POLICIES_BY_TYPE = defaultdict(list)
for p in POLICIES:
POLICIES_BY_TYPE[p.policy_type].append(p)
def get_server_number(ipport, ipport2server):
server_number = ipport2server[ipport]
server, number = server_number[:-1], server_number[-1:]
try:
number = int(number)
except ValueError:
# probably the proxy
return server_number, None
return server, number
def start_server(ipport, ipport2server):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).start(number=number, wait=True)
if err:
raise Exception('unable to start %s' % (
server if not number else '%s%s' % (server, number)))
return check_server(ipport, ipport2server)
def _check_storage(ipport, path):
conn = HTTPConnection(*ipport)
conn.request('GET', path)
resp = conn.getresponse()
# 404 because it's a nonsense path (and mount_check is false)
# 507 in case the test target is a VM using mount_check
if resp.status not in (404, 507):
raise Exception(
'Unexpected status %s' % resp.status)
return resp
def _check_proxy(user, key):
url, token = get_auth(PROXY_BASE_URL + '/auth/v1.0',
user, key)
account = url.split('/')[-1]
head_account(url, token)
return url, token, account
def _retry_timeout(f, args=None, kwargs=None, timeout=CHECK_SERVER_TIMEOUT):
args = args or ()
kwargs = kwargs or {}
try_until = time() + timeout
while True:
try:
return f(*args, **kwargs)
except Exception as err:
if time() > try_until:
print(err)
fsignature = '%s(*%r, **%r)' % (f.__name__, args, kwargs)
print('Giving up on %s after %s seconds.' % (
fsignature, timeout))
raise err
sleep(0.1)
def check_server(ipport, ipport2server):
server = ipport2server[ipport]
if server[:-1] in ('account', 'container', 'object'):
if int(server[-1]) > 4:
return None
path = '/connect/1/2'
if server[:-1] == 'container':
path += '/3'
elif server[:-1] == 'object':
path += '/3/4'
rv = _retry_timeout(_check_storage, args=(ipport, path))
else:
rv = _retry_timeout(_check_proxy, args=(
'test:tester', 'testing'))
return rv
def kill_server(ipport, ipport2server):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).kill(number=number)
if err:
raise Exception('unable to kill %s' % (server if not number else
'%s%s' % (server, number)))
return wait_for_server_to_hangup(ipport)
def wait_for_server_to_hangup(ipport):
try_until = time() + 30
while True:
try:
conn = HTTPConnection(*ipport)
conn.request('GET', '/')
conn.getresponse()
except Exception:
break
if time() > try_until:
raise Exception(
'Still answering on %s:%s after 30 seconds' % ipport)
sleep(0.1)
def kill_nonprimary_server(primary_nodes, ipport2server):
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
for ipport, server in ipport2server.items():
if ipport in primary_ipports:
server_type = server[:-1]
break
else:
raise Exception('Cannot figure out server type for %r' % primary_nodes)
for ipport, server in list(ipport2server.items()):
if server[:-1] == server_type and ipport not in primary_ipports:
kill_server(ipport, ipport2server)
return ipport
def add_ring_devs_to_ipport2server(ring, server_type, ipport2server,
servers_per_port=0):
# We'll number the servers by order of unique occurrence of:
# IP, if servers_per_port > 0 OR there > 1 IP in ring
# ipport, otherwise
unique_ip_count = len({dev['ip'] for dev in ring.devs if dev})
things_to_number = {}
number = 0
for dev in filter(None, ring.devs):
ip = dev['ip']
ipport = (ip, dev['port'])
unique_by = ip if servers_per_port or unique_ip_count > 1 else ipport
if unique_by not in things_to_number:
number += 1
things_to_number[unique_by] = number
ipport2server[ipport] = '%s%d' % (server_type,
things_to_number[unique_by])
def store_config_paths(name, configs):
server_names = [name, '%s-replicator' % name]
if name == 'container':
server_names.append('container-sharder')
elif name == 'object':
server_names.append('object-reconstructor')
for server_name in server_names:
for server in Manager([server_name]):
for i, conf in enumerate(server.conf_files(), 1):
configs[server.server][i] = conf
def get_ring(ring_name, required_replicas, required_devices,
server=None, force_validate=None, ipport2server=None,
config_paths=None):
if not server:
server = ring_name
ring = Ring('/etc/swift', ring_name=ring_name)
if ipport2server is None:
ipport2server = {} # used internally, even if not passed in
if config_paths is None:
config_paths = defaultdict(dict)
store_config_paths(server, config_paths)
repl_name = '%s-replicator' % server
repl_configs = {i: readconf(c, section_name=repl_name)
for i, c in config_paths[repl_name].items()}
servers_per_port = any(int(c.get('servers_per_port', '0'))
for c in repl_configs.values())
add_ring_devs_to_ipport2server(ring, server, ipport2server,
servers_per_port=servers_per_port)
if not VALIDATE_RSYNC and not force_validate:
return ring
# easy sanity checks
if ring.replica_count != required_replicas:
raise unittest.SkipTest('%s has %s replicas instead of %s' % (
ring.serialized_path, ring.replica_count, required_replicas))
devs = [dev for dev in ring.devs if dev is not None]
if len(devs) != required_devices:
raise unittest.SkipTest('%s has %s devices instead of %s' % (
ring.serialized_path, len(devs), required_devices))
for dev in devs:
# verify server is exposing mounted device
ipport = (dev['ip'], dev['port'])
_, server_number = get_server_number(ipport, ipport2server)
conf = repl_configs[server_number]
for device in os.listdir(conf['devices']):
if device == dev['device']:
dev_path = os.path.join(conf['devices'], device)
full_path = os.path.realpath(dev_path)
if not os.path.exists(full_path):
raise unittest.SkipTest(
'device %s in %s was not found (%s)' %
(device, conf['devices'], full_path))
break
else:
raise unittest.SkipTest(
"unable to find ring device %s under %s's devices (%s)" % (
dev['device'], server, conf['devices']))
# verify server is exposing rsync device
rsync_export = conf.get('rsync_module', '').rstrip('/')
if not rsync_export:
rsync_export = '{replication_ip}::%s' % server
cmd = "rsync %s" % rsync_module_interpolation(rsync_export, dev)
p = Popen(cmd, shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
if p.returncode:
raise unittest.SkipTest('unable to connect to rsync '
'export %s (%s)' % (rsync_export, cmd))
for line in stdout.decode().splitlines():
if line.rsplit(None, 1)[-1] == dev['device']:
break
else:
raise unittest.SkipTest("unable to find ring device %s under "
"rsync's exported devices for %s (%s)" %
(dev['device'], rsync_export, cmd))
return ring
def get_policy(**kwargs):
kwargs.setdefault('is_deprecated', False)
# go through the policies and make sure they match the
# requirements of kwargs
for policy in POLICIES:
# TODO: for EC, pop policy type here and check it first
matches = True
for key, value in kwargs.items():
try:
if getattr(policy, key) != value:
matches = False
except AttributeError:
matches = False
if matches:
return policy
raise unittest.SkipTest('No policy matching %s' % kwargs)
def run_cleanup(cmd):
p = Popen(cmd + " 2>&1", shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
if p.returncode:
raise AssertionError(
'Cleanup with %r failed: stdout: %s, stderr: %s'
% (cmd, stdout, _stderr))
print(stdout)
Manager(['all']).stop()
def resetswift():
run_cleanup("resetswift")
def kill_orphans():
run_cleanup("swift-orphans -a 0 -k 9")
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.length = int(total)
self.hasher = md5(usedforsecurity=False)
self.read_amount = 0
self.chunk = uuid4().hex.encode('ascii') * 2 ** 10
self.buff = b''
@property
def etag(self):
return self.hasher.hexdigest()
def __len__(self):
return self.length
def read(self, amount):
if len(self.buff) < amount:
try:
self.buff += next(self)
except StopIteration:
pass
rv, self.buff = self.buff[:amount], self.buff[amount:]
return rv
def __iter__(self):
return self
def __next__(self):
if self.buff:
rv, self.buff = self.buff, b''
return rv
if self.read_amount >= self.length:
raise StopIteration()
rv = self.chunk[:int(self.length - self.read_amount)]
self.read_amount += len(rv)
self.hasher.update(rv)
return rv
# for py2 compat:
next = __next__
class ProbeTest(unittest.TestCase):
"""
Don't instantiate this directly, use a child class instead.
"""
def _load_rings_and_configs(self):
self.ipport2server = {}
self.configs = defaultdict(dict)
self.account_ring = get_ring(
'account',
self.acct_cont_required_replicas,
self.acct_cont_required_devices,
ipport2server=self.ipport2server,
config_paths=self.configs)
self.container_ring = get_ring(
'container',
self.acct_cont_required_replicas,
self.acct_cont_required_devices,
ipport2server=self.ipport2server,
config_paths=self.configs)
self.policy = get_policy(**self.policy_requirements)
self.object_ring = get_ring(
self.policy.ring_name,
self.obj_required_replicas,
self.obj_required_devices,
server='object',
ipport2server=self.ipport2server,
config_paths=self.configs)
for server in Manager(['proxy-server']):
for conf in server.conf_files():
self.configs['proxy-server'] = conf
def setUp(self):
# previous test may have left DatabaseBroker instances in garbage with
# open connections to db files which will prevent unmounting devices in
# resetswift, so collect garbage now
gc.collect()
resetswift()
kill_orphans()
self._load_rings_and_configs()
try:
self.servers_per_port = any(
int(readconf(c, section_name='object-replicator').get(
'servers_per_port', '0'))
for c in self.configs['object-replicator'].values())
Manager(['main']).start(wait=True)
for ipport in self.ipport2server:
check_server(ipport, self.ipport2server)
proxy_conf = readconf(self.configs['proxy-server'],
section_name='app:proxy-server')
proxy_ipport = (proxy_conf.get('bind_ip', '127.0.0.1'),
int(proxy_conf.get('bind_port', 8080)))
self.ipport2server[proxy_ipport] = 'proxy'
self.url, self.token, self.account = check_server(
proxy_ipport, self.ipport2server)
self.account_1 = {
'url': self.url, 'token': self.token, 'account': self.account}
rv = _retry_timeout(_check_proxy, args=(
'test2:tester2', 'testing2'))
self.account_2 = {
k: v for (k, v) in zip(('url', 'token', 'account'), rv)}
self.replicators = Manager(
['account-replicator', 'container-replicator',
'object-replicator'])
self.updaters = Manager(['container-updater', 'object-updater'])
except BaseException:
try:
raise
finally:
try:
Manager(['all']).kill()
except Exception:
pass
info_url = "%s://%s/info" % (urlparse(self.url).scheme,
urlparse(self.url).netloc)
proxy_conn = client.http_connection(info_url)
self.cluster_info = client.get_capabilities(proxy_conn)
def tearDown(self):
Manager(['all']).kill()
def assertLengthEqual(self, obj, length):
obj_len = len(obj)
self.assertEqual(obj_len, length, 'len(%r) == %d, not %d' % (
obj, obj_len, length))
def device_dir(self, node):
server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
repl_server = '%s-replicator' % server_type
conf = readconf(self.configs[repl_server][config_number],
section_name=repl_server)
return os.path.join(conf['devices'], node['device'])
def storage_dir(self, node, part=None, policy=None):
policy = policy or self.policy
device_path = self.device_dir(node)
path_parts = [device_path, get_data_dir(policy)]
if part is not None:
path_parts.append(str(part))
return os.path.join(*path_parts)
def config_number(self, node):
_server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
return config_number
def is_local_to(self, node1, node2):
"""
Return True if both ring devices are "local" to each other (on the same
"server".
"""
if self.servers_per_port:
return node1['ip'] == node2['ip']
# Without a disambiguating IP, for SAIOs, we have to assume ports
# uniquely identify "servers". SAIOs should be configured to *either*
# have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
# ports per server (i.e. sdb1 & sdb5 would have same port numbers in
# the 8-disk EC ring).
return node1['port'] == node2['port']
def get_to_final_state(self):
# these .stop()s are probably not strictly necessary,
# but may prevent race conditions
self.replicators.stop()
self.updaters.stop()
self.replicators.once()
self.updaters.once()
self.replicators.once()
def kill_drive(self, device):
if os.path.ismount(device):
os.system('sudo umount %s' % device)
else:
renamer(device, device + "X")
def revive_drive(self, device):
disabled_name = device + "X"
if os.path.isdir(disabled_name):
renamer(disabled_name, device)
else:
os.system('sudo mount %s' % device)
def make_internal_client(self):
tempdir = mkdtemp()
try:
conf_path = os.path.join(tempdir, 'internal_client.conf')
conf_body = """
[DEFAULT]
swift_dir = /etc/swift
[pipeline:main]
pipeline = catch_errors cache copy proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = True
[filter:copy]
use = egg:swift#copy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
"""
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
return internal_client.InternalClient(conf_path, 'test', 1)
finally:
shutil.rmtree(tempdir)
def get_all_object_nodes(self):
"""
Returns a list of all nodes in all object storage policies.
:return: a list of node dicts.
"""
all_obj_nodes = {}
for policy in ENABLED_POLICIES:
for dev in policy.object_ring.devs:
all_obj_nodes[dev['device']] = dev
return all_obj_nodes.values()
def gather_async_pendings(self, onodes):
"""
Returns a list of paths to async pending files found on given nodes.
:param onodes: a list of nodes.
:return: a list of file paths.
"""
async_pendings = []
for onode in onodes:
device_dir = self.device_dir(onode)
for ap_pol_dir in os.listdir(device_dir):
if not ap_pol_dir.startswith('async_pending'):
# skip 'objects', 'containers', etc.
continue
async_pending_dir = os.path.join(device_dir, ap_pol_dir)
try:
ap_dirs = os.listdir(async_pending_dir)
except OSError as err:
if err.errno == errno.ENOENT:
pass
else:
raise
else:
for ap_dir in ap_dirs:
ap_dir_fullpath = os.path.join(
async_pending_dir, ap_dir)
async_pendings.extend([
os.path.join(ap_dir_fullpath, ent)
for ent in os.listdir(ap_dir_fullpath)])
return async_pendings
def run_custom_daemon(self, klass, conf_section, conf_index,
custom_conf, **kwargs):
conf_file = self.configs[conf_section][conf_index]
conf = utils.readconf(conf_file, conf_section)
conf.update(custom_conf)
# Use a CaptureLogAdapter in order to preserve the pattern of tests
# calling the log accessor methods (e.g. get_lines_for_level) directly
# on the logger instance
with capture_logger(conf, conf.get('log_name', conf_section),
log_to_console=kwargs.pop('verbose', False),
log_route=conf_section) as log_adapter:
daemon = klass(conf, log_adapter)
daemon.run_once(**kwargs)
return daemon
def _get_db_file_path(obj_dir):
files = sorted(os.listdir(obj_dir), reverse=True)
for filename in files:
if filename.endswith('db'):
return os.path.join(obj_dir, filename)
class ReplProbeTest(ProbeTest):
acct_cont_required_replicas = 3
acct_cont_required_devices = 4
obj_required_replicas = 3
obj_required_devices = 4
policy_requirements = {'policy_type': REPL_POLICY}
def direct_container_op(self, func, account=None, container=None,
expect_failure=False):
account = account if account else self.account
container = container if container else self.container_to_shard
cpart, cnodes = self.container_ring.get_nodes(account, container)
unexpected_responses = []
results = {}
for cnode in cnodes:
try:
results[cnode['id']] = func(cnode, cpart, account, container)
except DirectClientException as err:
if not expect_failure:
unexpected_responses.append((cnode, err))
else:
if expect_failure:
unexpected_responses.append((cnode, 'success'))
if unexpected_responses:
self.fail('Unexpected responses: %s' % unexpected_responses)
return results
def direct_delete_container(self, account=None, container=None,
expect_failure=False):
self.direct_container_op(direct_client.direct_delete_container,
account, container, expect_failure)
def direct_head_container(self, account=None, container=None,
expect_failure=False):
return self.direct_container_op(direct_client.direct_head_container,
account, container, expect_failure)
def direct_get_container(self, account=None, container=None,
expect_failure=False):
return self.direct_container_op(direct_client.direct_get_container,
account, container, expect_failure)
def get_container_db_files(self, container):
opart, onodes = self.container_ring.get_nodes(self.account, container)
db_files = []
for onode in onodes:
node_id = self.config_number(onode)
device = onode['device']
hash_str = hash_path(self.account, container)
server_conf = readconf(self.configs['container-server'][node_id])
devices = server_conf['app:container-server']['devices']
obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices,
device, opart,
hash_str[-3:], hash_str)
db_files.append(_get_db_file_path(obj_dir))
return db_files
class ECProbeTest(ProbeTest):
acct_cont_required_replicas = 3
acct_cont_required_devices = 4
obj_required_replicas = 6
obj_required_devices = 8
policy_requirements = {'policy_type': EC_POLICY}
def _make_name(self, prefix):
return ('%s%s' % (prefix, uuid4())).encode()
def setUp(self):
super(ECProbeTest, self).setUp()
self.container_name = self._make_name('container-')
self.object_name = self._make_name('object-')
# sanity
self.assertEqual(self.policy.policy_type, EC_POLICY)
self.reconstructor = Manager(["object-reconstructor"])
def proxy_put(self, extra_headers=None):
contents = Body()
headers = {
self._make_name('x-object-meta-').decode('utf8'):
self._make_name('meta-foo-').decode('utf8'),
}
if extra_headers:
headers.update(extra_headers)
self.etag = client.put_object(self.url, self.token,
self.container_name,
self.object_name,
contents=contents, headers=headers)
def proxy_get(self):
# GET object
headers, body = client.get_object(self.url, self.token,
self.container_name,
self.object_name,
resp_chunk_size=64 * 2 ** 10)
resp_checksum = md5(usedforsecurity=False)
for chunk in body:
resp_checksum.update(chunk)
return headers, resp_checksum.hexdigest()
def direct_get(self, node, part, require_durable=True, extra_headers=None):
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
if extra_headers:
req_headers.update(extra_headers)
if not require_durable:
req_headers.update(
{'X-Backend-Fragment-Preferences': json.dumps([])})
# node dict has unicode values so utf8 decode our path parts too in
# case they have non-ascii characters
if six.PY2:
acc, con, obj = (s.decode('utf8') for s in (
self.account, self.container_name, self.object_name))
else:
acc, con, obj = self.account, self.container_name, self.object_name
headers, data = direct_client.direct_get_object(
node, part, acc, con, obj, headers=req_headers,
resp_chunk_size=64 * 2 ** 20)
hasher = md5(usedforsecurity=False)
for chunk in data:
hasher.update(chunk)
return headers, hasher.hexdigest()
def assert_direct_get_fails(self, onode, opart, status,
require_durable=True):
try:
self.direct_get(onode, opart, require_durable=require_durable)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, status)
return err
else:
self.fail('Node data on %r was not fully destroyed!' % (onode,))
def assert_direct_get_succeeds(self, onode, opart, require_durable=True,
extra_headers=None):
try:
return self.direct_get(onode, opart,
require_durable=require_durable,
extra_headers=extra_headers)
except direct_client.DirectClientException as err:
self.fail('Node data on %r was not available: %s' % (onode, err))
def break_nodes(self, nodes, opart, failed, non_durable):
# delete partitions on the failed nodes and remove durable marker from
# non-durable nodes
made_non_durable = 0
for i, node in enumerate(nodes):
part_dir = self.storage_dir(node, part=opart)
if i in failed:
shutil.rmtree(part_dir, True)
try:
self.direct_get(node, opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
elif i in non_durable:
for dirs, subdirs, files in os.walk(part_dir):
for fname in sorted(files, reverse=True):
# make the newest durable be non-durable
if fname.endswith('.data'):
made_non_durable += 1
non_durable_fname = fname.replace('#d', '')
os.rename(os.path.join(dirs, fname),
os.path.join(dirs, non_durable_fname))
break
headers, etag = self.direct_get(node, opart,
require_durable=False)
self.assertNotIn('X-Backend-Durable-Timestamp', headers)
try:
os.remove(os.path.join(part_dir, 'hashes.pkl'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return made_non_durable
def make_durable(self, nodes, opart):
# ensure all data files on the specified nodes are durable
made_durable = 0
for i, node in enumerate(nodes):
part_dir = self.storage_dir(node, part=opart)
for dirs, subdirs, files in os.walk(part_dir):
for fname in sorted(files, reverse=True):
# make the newest non-durable be durable
if (fname.endswith('.data') and
not fname.endswith('#d.data')):
made_durable += 1
non_durable_fname = fname.replace('.data', '#d.data')
os.rename(os.path.join(dirs, fname),
os.path.join(dirs, non_durable_fname))
break
headers, etag = self.assert_direct_get_succeeds(node, opart)
self.assertIn('X-Backend-Durable-Timestamp', headers)
try:
os.remove(os.path.join(part_dir, 'hashes.pkl'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return made_durable
if __name__ == "__main__":
for server in ('account', 'container'):
try:
get_ring(server, 3, 4,
force_validate=True)
except unittest.SkipTest as err:
sys.exit('%s ERROR: %s' % (server, err))
print('%s OK' % server)
for policy in POLICIES:
try:
get_ring(policy.ring_name, 3, 4,
server='object', force_validate=True)
except unittest.SkipTest as err:
sys.exit('object ERROR (%s): %s' % (policy.name, err))
print('object OK (%s)' % policy.name)
|
openstack/swift
|
test/probe/common.py
|
Python
|
apache-2.0
| 30,724
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = tf.convert_to_tensor_v2(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.reduce_mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```python
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
"""Initializes `Loss` class.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op.
"""
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
# SUM_OVER_BATCH is only allowed in losses managed by `fit` or
# CannedEstimators.
self._allow_sum_over_batch_size = False
self._set_name_scope()
def _set_name_scope(self):
"""Creates a valid `name_scope` name."""
if self.name is None:
self._name_scope = self.__class__.__name__
elif self.name == '<lambda>':
self._name_scope = 'lambda'
else:
# E.g. '_my_loss' => 'my_loss'
self._name_scope = self.name.strip('_')
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a coefficient for the
loss. If a scalar is provided, then the loss is simply scaled by the
given value. If `sample_weight` is a tensor of size `[batch_size]`, then
the total loss for each sample of the batch is rescaled by the
corresponding element in the `sample_weight` vector. If the shape of
`sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to
this shape), then each loss element of `y_pred` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(self._name_scope), graph_ctx:
if context.executing_eagerly():
call_fn = self.call
else:
call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())
losses = call_fn(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config dictionary for a `Loss` instance."""
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
Returns:
Loss values with the shape `[batch_size, d0, .. dN-1]`.
"""
raise NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if (not self._allow_sum_over_batch_size and
distribution_strategy_context.has_strategy() and
(self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.Reduction.NONE)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/tutorials/distribute/custom_training'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class."""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
"""Initializes `LossFunctionWrapper` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())
return ag_fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
`loss = square(y_true - y_pred)`
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError()
>>> mse(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mse(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mse(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
"""Initializes `MeanSquaredError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'mean_squared_error'.
"""
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError()
>>> mae(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mae(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mae(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
"""Initializes `MeanAbsoluteError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'mean_absolute_error'.
"""
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Standalone usage:
>>> y_true = [[2., 1.], [2., 3.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError()
>>> mape(y_true, y_pred).numpy()
50.
>>> # Calling with 'sample_weight'.
>>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
20.
>>> # Using 'sum' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mape(y_true, y_pred).numpy()
100.
>>> # Using 'none' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mape(y_true, y_pred).numpy()
array([25., 75.], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
"""Initializes `MeanAbsolutePercentageError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to
'mean_absolute_percentage_error'.
"""
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true + 1.) - log(y_pred + 1.))`
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError()
>>> msle(y_true, y_pred).numpy()
0.240
>>> # Calling with 'sample_weight'.
>>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.120
>>> # Using 'sum' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> msle(y_true, y_pred).numpy()
0.480
>>> # Using 'none' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> msle(y_true, y_pred).numpy()
array([0.240, 0.240], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
"""Initializes `MeanSquaredLogarithmicError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to
'mean_squared_logarithmic_error'.
"""
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
`from_logits=False`).
**Recommended Usage:** (set `from_logits=True`)
With `tf.keras` API:
```python
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
....
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = [0, 1, 0, 0]
>>> y_pred = [-18.6, 0.51, 2.94, -12.8]
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred).numpy()
0.865
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred).numpy()
0.865
>>> # Using 'sample_weight' attribute
>>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.243
>>> # Using 'sum' reduction` type.
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,
... reduction=tf.keras.losses.Reduction.SUM)
>>> bce(y_true, y_pred).numpy()
1.730
>>> # Using 'none' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,
... reduction=tf.keras.losses.Reduction.NONE)
>>> bce(y_true, y_pred).numpy()
array([0.235, 1.496], dtype=float32)
**Default Usage:** (set `from_logits=False`)
>>> # Make the following updates to the above "Recommended Usage" section
>>> # 1. Set `from_logits=False`
>>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')
>>> # 2. Update `y_pred` to use probabilities instead of logits
>>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
"""Initializes `BinaryCrossentropy` instance.
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` contains probabilities (i.e., values in [0, 1]).
**Note - Using from_logits=True may be more numerically stable.
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0,
we compute the loss between the predicted labels and a smoothed version
of the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: (Optional) Name for the op. Defaults to 'binary_crossentropy'.
"""
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy()
>>> cce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> cce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> cce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
"""Initializes `CategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
**Note - Using from_logits=True is more numerically stable.**
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. For example, if
`0.1`, use `0.1 / num_classes` for non-target labels and
`0.9 + 0.1 / num_classes` for target labels.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'categorical_crossentropy'.
"""
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Standalone usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
>>> scce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> scce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> scce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
"""Initializes `SparseCategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
**Note - Using from_logits=True may be more numerically stable.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to
'sparse_categorical_crossentropy'.
"""
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Hinge()
>>> h(y_true, y_pred).numpy()
1.3
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.55
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Hinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
2.6
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Hinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.1, 1.5], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):
"""Initializes `Hinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'hinge'.
"""
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.SquaredHinge()
>>> h(y_true, y_pred).numpy()
1.86
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.73
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
3.72
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.46, 2.26], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
"""Initializes `SquaredHinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'squared_hinge'.
"""
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.CategoricalHinge()
>>> h(y_true, y_pred).numpy()
1.4
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.6
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.CategoricalHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
2.8
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.CategoricalHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.2, 1.6], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
"""Initializes `CategoricalHinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'categorical_hinge'.
"""
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> p = tf.keras.losses.Poisson()
>>> p(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.4
>>> # Using 'sum' reduction type.
>>> p = tf.keras.losses.Poisson(
... reduction=tf.keras.losses.Reduction.SUM)
>>> p(y_true, y_pred).numpy()
0.999
>>> # Using 'none' reduction type.
>>> p = tf.keras.losses.Poisson(
... reduction=tf.keras.losses.Reduction.NONE)
>>> p(y_true, y_pred).numpy()
array([0.999, 0.], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
"""Initializes `Poisson` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'poisson'.
"""
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`.
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> l = tf.keras.losses.LogCosh()
>>> l(y_true, y_pred).numpy()
0.108
>>> # Calling with 'sample_weight'.
>>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.087
>>> # Using 'sum' reduction type.
>>> l = tf.keras.losses.LogCosh(
... reduction=tf.keras.losses.Reduction.SUM)
>>> l(y_true, y_pred).numpy()
0.217
>>> # Using 'none' reduction type.
>>> l = tf.keras.losses.LogCosh(
... reduction=tf.keras.losses.Reduction.NONE)
>>> l(y_true, y_pred).numpy()
array([0.217, 0.], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='log_cosh'):
"""Initializes `LogCosh` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'log_cosh'.
"""
super(LogCosh, self).__init__(log_cosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> kl = tf.keras.losses.KLDivergence()
>>> kl(y_true, y_pred).numpy()
0.458
>>> # Calling with 'sample_weight'.
>>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.366
>>> # Using 'sum' reduction type.
>>> kl = tf.keras.losses.KLDivergence(
... reduction=tf.keras.losses.Reduction.SUM)
>>> kl(y_true, y_pred).numpy()
0.916
>>> # Using 'none' reduction type.
>>> kl = tf.keras.losses.KLDivergence(
... reduction=tf.keras.losses.Reduction.NONE)
>>> kl(y_true, y_pred).numpy()
array([0.916, -3.08e-06], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kl_divergence'):
"""Initializes `KLDivergence` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'kl_divergence'.
"""
super(KLDivergence, self).__init__(
kl_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Huber()
>>> h(y_true, y_pred).numpy()
0.155
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.09
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
0.31
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([0.18, 0.13], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.Huber())
```
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
"""Initializes `Huber` instance.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'huber_loss'.
"""
super(Huber, self).__init__(
huber, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error', 'keras.metrics.mse',
'keras.metrics.MSE', 'keras.losses.mean_squared_error',
'keras.losses.mse', 'keras.losses.MSE')
@dispatch.add_dispatch_support
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred):
"""Apply a loss function on a per batch basis.
Args:
loss_fn: The loss function
y_true: truth values (RaggedTensor)
y_pred: predicted values (RaggedTensor)
Returns:
Loss-function result. A dense tensor if the output has a single dimension
(per-batch loss value); a ragged tensor otherwise.
"""
def rt_is_equiv_dense(rt):
"""Returns true if this RaggedTensor has the same row_lenghts across
all ragged dimensions and thus can be converted to a dense tensor
without loss of information.
Args:
rt: RaggedTensor
"""
return math_ops.reduce_all([
math_ops.equal(
math_ops.reduce_variance(math_ops.cast(row_lens, K.floatx())),
constant_op.constant([0.])) for row_lens in rt.nested_row_lengths()
])
def _convert_to_dense(inputs):
return tuple(rt.to_tensor() for rt in inputs)
def _wrapper(inputs):
_, y_pred = inputs
if isinstance(y_pred, ragged_tensor.RaggedTensor):
return control_flow_ops.cond(
rt_is_equiv_dense(y_pred),
lambda: loss_fn(*_convert_to_dense(inputs)), lambda: loss_fn(*inputs))
return loss_fn(*inputs)
lshape = y_pred.shape.as_list()[1:-1]
if len(lshape) > 0:
spec = ragged_tensor.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype)
else:
spec = tensor_spec.TensorSpec(shape=[], dtype=y_pred.dtype)
nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)]
assertion_list = ragged_util.assert_splits_match(nested_splits_list)
with ops.control_dependencies(assertion_list):
return ragged_map_ops.map_fn(_wrapper, elems=(y_true, y_pred), dtype=spec)
@dispatch.dispatch_for_types(mean_squared_error, ragged_tensor.RaggedTensor)
def _ragged_tensor_mse(y_true, y_pred):
""" Implements support for handling RaggedTensors.
Args:
y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
When the number of dimensions of the batch feature vector [d0, .. dN] is
greater than one the return value is a RaggedTensor. Otherwise a Dense
tensor with dimensions [batch_size] is returned.
"""
return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)
@keras_export('keras.metrics.mean_absolute_error', 'keras.metrics.mae',
'keras.metrics.MAE', 'keras.losses.mean_absolute_error',
'keras.losses.mae', 'keras.losses.MAE')
@dispatch.add_dispatch_support
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
`loss = mean(abs(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@dispatch.dispatch_for_types(mean_absolute_error, ragged_tensor.RaggedTensor)
def _ragged_tensor_mae(y_true, y_pred):
""" RaggedTensor adapter for mean_absolute_error"""
return _ragged_tensor_apply_loss(mean_absolute_error, y_true, y_pred)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape', 'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape', 'keras.losses.MAPE')
@dispatch.add_dispatch_support
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)`
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))
return 100. * K.mean(diff, axis=-1)
@dispatch.dispatch_for_types(mean_absolute_percentage_error,
ragged_tensor.RaggedTensor)
def _ragged_tensor_mape(y_true, y_pred):
""" Support RaggedTensors."""
return _ragged_tensor_apply_loss(mean_absolute_percentage_error, y_true,
y_pred)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle', 'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle', 'keras.losses.MSLE')
@dispatch.add_dispatch_support
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = np.maximum(y_true, 1e-7)
>>> y_pred = np.maximum(y_pred, 1e-7)
>>> assert np.allclose(
... loss.numpy(),
... np.mean(
... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)
second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
@dispatch.dispatch_for_types(mean_squared_logarithmic_error,
ragged_tensor.RaggedTensor)
def _ragged_tensor_msle(y_true, y_pred):
""" Implements support for handling RaggedTensors."""
return _ragged_tensor_apply_loss(mean_squared_logarithmic_error, y_true,
y_pred)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels,
lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
@dispatch.add_dispatch_support
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`
Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
@dispatch.add_dispatch_support
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`
Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
@dispatch.add_dispatch_support
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Standalone usage:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> pos = np.sum(y_true * y_pred, axis=-1)
>>> neg = np.amax((1. - y_true) * y_pred, axis=-1)
>>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))
Args:
y_true: The ground truth values. `y_true` values are expected to be
either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor).
y_pred: The predicted values.
Returns:
Categorical hinge loss values.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
zero = math_ops.cast(0., y_pred.dtype)
return math_ops.maximum(neg - pos + 1., zero)
@keras_export('keras.losses.huber', v1=[])
@dispatch.add_dispatch_support
def huber(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
delta = math_ops.cast(delta, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
half = ops.convert_to_tensor_v2_with_dispatch(0.5, dtype=abs_error.dtype)
return K.mean(
array_ops.where_v2(
abs_error <= delta, half * math_ops.pow(error, 2),
half * math_ops.pow(delta, 2) + delta * (abs_error - delta)),
axis=-1)
@keras_export('keras.losses.log_cosh', 'keras.losses.logcosh',
'keras.metrics.log_cosh', 'keras.metrics.logcosh')
@dispatch.add_dispatch_support
def log_cosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.logcosh(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> x = y_pred - y_true
>>> assert np.allclose(
... loss.numpy(),
... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
@dispatch.add_dispatch_support
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2_with_dispatch(
label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,
lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@dispatch.dispatch_for_types(categorical_crossentropy,
ragged_tensor.RaggedTensor)
def _ragged_tensor_categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
""" Implements support for handling RaggedTensors.
Expected shape: (batch, sequence_len, n_classes) with sequence_len
being variable per batch.
Return shape: (batch, sequence_len).
When used by CategoricalCrossentropy() with the default reduction
(SUM_OVER_BATCH_SIZE), the reduction averages the loss over the
number of elements independent of the batch. E.g. if the RaggedTensor
has 2 batches with [2, 1] values respectivly the resulting loss is
the sum of the individual loss values divided by 3.
"""
fn = functools.partial(
categorical_crossentropy,
from_logits=from_logits,
label_smoothing=label_smoothing)
return _ragged_tensor_apply_loss(fn, y_true, y_pred)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
@dispatch.add_dispatch_support
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss.
Standalone usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
@dispatch.add_dispatch_support
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Computes the binary crossentropy loss.
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.916 , 0.714], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by
squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing`
for the target class and `0.5 * label_smoothing` for the non-target class.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2_with_dispatch(
label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,
lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@dispatch.dispatch_for_types(binary_crossentropy, ragged_tensor.RaggedTensor)
def _ragged_tensor_binary_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
""" Implements support for handling RaggedTensors.
Expected shape: (batch, sequence_len) with sequence_len being variable
per batch.
Return shape: (batch,); returns the per batch mean of the loss values.
When used by BinaryCrossentropy() with the default reduction
(SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over
the number of batches.
"""
fn = functools.partial(
binary_crossentropy,
from_logits=from_logits,
label_smoothing=label_smoothing)
return _ragged_tensor_apply_loss(fn, y_true, y_pred)
@keras_export('keras.metrics.kl_divergence',
'keras.metrics.kullback_leibler_divergence', 'keras.metrics.kld',
'keras.metrics.KLD', 'keras.losses.kl_divergence',
'keras.losses.kullback_leibler_divergence', 'keras.losses.kld',
'keras.losses.KLD')
@dispatch.add_dispatch_support
def kl_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
@dispatch.add_dispatch_support
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
@dispatch.add_dispatch_support
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. The values closer to 1 indicate greater
dissimilarity. This makes it usable as a loss function in a setting
where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine
similarity will be 0 regardless of the proximity between predictions
and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Standalone usage:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)
>>> loss.numpy()
array([-0., -0.999, 0.999], dtype=float32)
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity.
Returns:
Cosine similarity tensor.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between labels and predictions.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. The values closer to 1 indicate greater
dissimilarity. This makes it usable as a loss function in a setting
where you try to maximize the proximity between predictions and targets.
If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0
regardless of the proximity between predictions and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Standalone usage:
>>> y_true = [[0., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = -((0. + 0.) + (0.5 + 0.5)) / 2
>>> cosine_loss(y_true, y_pred).numpy()
-0.5
>>> # Calling with 'sample_weight'.
>>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
-0.0999
>>> # Using 'sum' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
... reduction=tf.keras.losses.Reduction.SUM)
>>> cosine_loss(y_true, y_pred).numpy()
-0.999
>>> # Using 'none' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
... reduction=tf.keras.losses.Reduction.NONE)
>>> cosine_loss(y_true, y_pred).numpy()
array([-0., -0.999], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of
built-in training loops such as `tf.keras` `compile` and `fit`, using
`AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence = kl_divergence
logcosh = log_cosh
huber_loss = huber
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
"""Serializes loss function or `Loss` instance.
Args:
loss: A Keras `Loss` instance or a loss function.
Returns:
Loss configuration dictionary.
"""
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
"""Deserializes a serialized loss class/function instance.
Args:
name: Loss configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras `Loss` instance or a loss function.
"""
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
"""Retrieves a Keras loss as a `function`/`Loss` class instance.
The `identifier` may be the string name of a loss function or `Loss` class.
>>> loss = tf.keras.losses.get("categorical_crossentropy")
>>> type(loss)
<class 'function'>
>>> loss = tf.keras.losses.get("CategoricalCrossentropy")
>>> type(loss)
<class '...tensorflow.python.keras.losses.CategoricalCrossentropy'>
You can also specify `config` of the loss to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Loss` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> loss = tf.keras.losses.get(identifier)
>>> type(loss)
<class '...tensorflow.python.keras.losses.CategoricalCrossentropy'>
Args:
identifier: A loss identifier. One of None or string name of a loss
function/class or loss configuration dictionary or a loss function or a
loss class instance
Returns:
A Keras loss as a `function`/ `Loss` class instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError(
'Could not interpret loss function identifier: {}'.format(identifier))
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
|
annarev/tensorflow
|
tensorflow/python/keras/losses.py
|
Python
|
apache-2.0
| 76,501
|
from __future__ import absolute_import, division, unicode_literals
import glob
import logging
import operator
import os
import re
import sys
from mopidy import backend
from mopidy.m3u import translator
from mopidy.models import Playlist, Ref
logger = logging.getLogger(__name__)
class M3UPlaylistsProvider(backend.PlaylistsProvider):
# TODO: currently this only handles UNIX file systems
_invalid_filename_chars = re.compile(r'[/]')
def __init__(self, *args, **kwargs):
super(M3UPlaylistsProvider, self).__init__(*args, **kwargs)
self._playlists_dir = self.backend._config['m3u']['playlists_dir']
self._playlists = {}
self.refresh()
def as_list(self):
refs = [
Ref.playlist(uri=pl.uri, name=pl.name)
for pl in self._playlists.values()]
return sorted(refs, key=operator.attrgetter('name'))
def get_items(self, uri):
playlist = self._playlists.get(uri)
if playlist is None:
return None
return [Ref.track(uri=t.uri, name=t.name) for t in playlist.tracks]
def create(self, name):
playlist = self._save_m3u(Playlist(name=name))
self._playlists[playlist.uri] = playlist
logger.info('Created playlist %s', playlist.uri)
return playlist
def delete(self, uri):
if uri in self._playlists:
path = translator.playlist_uri_to_path(uri, self._playlists_dir)
if os.path.exists(path):
os.remove(path)
else:
logger.warning(
'Trying to delete missing playlist file %s', path)
del self._playlists[uri]
else:
logger.warning('Trying to delete unknown playlist %s', uri)
def lookup(self, uri):
return self._playlists.get(uri)
def refresh(self):
playlists = {}
encoding = sys.getfilesystemencoding()
for path in glob.glob(os.path.join(self._playlists_dir, b'*.m3u')):
relpath = os.path.basename(path)
uri = translator.path_to_playlist_uri(relpath)
name = os.path.splitext(relpath)[0].decode(encoding, 'replace')
tracks = translator.parse_m3u(path)
playlists[uri] = Playlist(uri=uri, name=name, tracks=tracks)
self._playlists = playlists
logger.info(
'Loaded %d M3U playlists from %s',
len(playlists), self._playlists_dir)
# TODO Trigger playlists_loaded event?
def save(self, playlist):
assert playlist.uri, 'Cannot save playlist without URI'
assert playlist.uri in self._playlists, \
'Cannot save playlist with unknown URI: %s' % playlist.uri
original_uri = playlist.uri
playlist = self._save_m3u(playlist)
if playlist.uri != original_uri and original_uri in self._playlists:
self.delete(original_uri)
self._playlists[playlist.uri] = playlist
return playlist
def _sanitize_m3u_name(self, name, encoding=sys.getfilesystemencoding()):
name = self._invalid_filename_chars.sub('|', name.strip())
# make sure we end up with a valid path segment
name = name.encode(encoding, errors='replace')
name = os.path.basename(name) # paranoia?
name = name.decode(encoding)
return name
def _save_m3u(self, playlist, encoding=sys.getfilesystemencoding()):
if playlist.name:
name = self._sanitize_m3u_name(playlist.name, encoding)
uri = translator.path_to_playlist_uri(
name.encode(encoding) + b'.m3u')
path = translator.playlist_uri_to_path(uri, self._playlists_dir)
elif playlist.uri:
uri = playlist.uri
path = translator.playlist_uri_to_path(uri, self._playlists_dir)
name, _ = os.path.splitext(os.path.basename(path).decode(encoding))
else:
raise ValueError('M3U playlist needs name or URI')
translator.save_m3u(path, playlist.tracks, 'latin1')
# assert playlist name matches file name/uri
return playlist.replace(uri=uri, name=name)
|
dbrgn/mopidy
|
mopidy/m3u/playlists.py
|
Python
|
apache-2.0
| 4,154
|
"""
Stdin: N/A
Stdout: N/A
Author: Jey Han Lau
Date: Nov 16
"""
import argparse
import sys
import codecs
import imp
import operator
import random
import os
import cPickle
import time as tm
import tensorflow as tf
import numpy as np
from geo_model import TGP
from util import *
#parser arguments
desc = "Given train/valid json tweets, train neural network to predict tweet locations"
parser = argparse.ArgumentParser(description=desc)
###################
#optional argument#
###################
#parser.add_argument("-v", "--verbosity", help="")
parser.add_argument("-c", "--config", help="path of config file")
args = parser.parse_args()
#load config
if args.config:
print "Loading config from:", args.config
cf = imp.load_source('config', args.config)
else:
print "Loading config from default directory"
import config as cf
###########
#functions#
###########
def run_epoch(data, models, is_training):
reps = []
start_time = tm.time()
costs, accs = 0.0, []
num_batches = int(len(data)/cf.batch_size)
batch_ids = range(num_batches)
random.shuffle(batch_ids)
#create text length to bucket id map
lenxbucket, prev_b = {}, -1
for bi, b in enumerate(cf.bucket_sizes):
for i in xrange(prev_b+1, b+1):
lenxbucket[i] = (bi, b)
prev_b = b
#generate results to call to models in different buckets
res = []
for bi, b in enumerate(cf.bucket_sizes):
res.append([models[bi].cost, models[bi].probs, models[bi].rep, models[bi].train_op \
if is_training else tf.no_op()])
for ni, i in enumerate(batch_ids):
x, y, time, day, offset, timezone, loc, desc, name, usertime, noise, _, b = \
get_batch(data, i, lenxbucket, is_training, cf)
cost, prob, dn , _ = sess.run(res[b], {models[b].x:x, models[b].y:y, models[b].time:time, models[b].day:day, \
models[b].offset:offset, models[b].timezone:timezone, models[b].loc:loc, models[b].desc:desc, \
models[b].name:name, models[b].usertime:usertime, models[b].noise:noise})
costs += cost
pred = np.argmax(prob, axis=1)
accs.extend(pred == y)
if not is_training:
reps.extend(list(np.reshape(dn, [-1])))
#print some training statistics
if (((ni % 10) == 0) and cf.verbose) or (ni == num_batches-1):
if is_training:
sys.stdout.write("TRAIN ")
else:
sys.stdout.write("VALID ")
sys.stdout.write("%d/%d: avg loss = %.3f; avg acc = %.3f; inst/sec = %.1f" % \
(ni+1, num_batches, costs/(ni+1), np.mean(accs), float((ni+1)*cf.batch_size)/(tm.time()-start_time)))
if ni == (num_batches-1):
sys.stdout.write("\n")
else:
sys.stdout.write("\r")
sys.stdout.flush()
return np.array(reps), np.mean(accs)
######
#main#
######
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
#set the seeds
random.seed(cf.seed)
np.random.seed(cf.seed)
#load raw train and valid data and labels
print "Loading train and valid labels..."
train_label = load_label(cf.train_label, cf)
valid_label = load_label(cf.valid_label, cf)
print "Loading train and valid data..."
train_data = load_data(cf.train_data, train_label, True, cf)
valid_data = load_data(cf.valid_data, valid_label, False, cf)
#collect vocab and classes
print "Collecting text vocab..."
vocabxid, idxvocab, _ = get_vocab(train_data, "text", "char", cf.word_minfreq)
print "Collecting time zone vocab..."
tzxid, _, _ = get_vocab(train_data, "timezone", "word", 0)
print "Collecting location vocab..."
locxid, _, _ = get_vocab(train_data, "location", "char", cf.word_minfreq)
print "Collecting description vocab..."
descxid, _, _ = get_vocab(train_data, "description", "char", cf.word_minfreq)
print "Collecting name vocab..."
namexid, _, _ = get_vocab(train_data, "name", "char", cf.word_minfreq)
print "Collecting class labels..."
classes = get_classes(train_data, train_label)
#clean text data
print "Converting text to ids..."
train_len_x, train_miss_y, train_len_loc, train_len_desc, train_len_name = clean_data(train_data, train_label, \
vocabxid, tzxid, locxid, descxid, namexid, classes, cf)
valid_len_x, valid_miss_y, valid_len_loc, valid_len_desc, valid_len_name = clean_data(valid_data, valid_label, \
vocabxid, tzxid, locxid, descxid, namexid, classes, cf)
#Sorting data based on length
print "Sorting data based on tweet length..."
train_data = sorted(train_data, key=lambda item: len(item["x"]))
valid_data = sorted(valid_data, key=lambda item: len(item["x"]))
print "\nStatistics:"
print "Number of train instances =", len(train_data)
print "Number of valid instances =", len(valid_data)
print "Text vocab size =", len(vocabxid)
print "Location vocab size =", len(locxid)
print "Description vocab size =", len(descxid)
print "Name vocab size =", len(namexid)
print "Class size =", len(classes)
print "No. of timezones =", len(tzxid)
print ("Train:\n\tmean/max text len = %.2f/%d;" + \
"\n\tmean/max location len = %.2f/%d;" + \
"\n\tmean/max description len = %.2f/%d;" + \
"\n\tmean/max name len = %.2f/%d;" + \
"\n\tno. instances with missing classes = %d") % \
(np.mean(train_len_x), max(train_len_x), np.mean(train_len_loc), max(train_len_loc), \
np.mean(train_len_desc), max(train_len_desc), np.mean(train_len_name), max(train_len_name), train_miss_y)
print ("Valid:\n\tmean/max text len = %.2f/%d;" + \
"\n\tmean/max location len = %.2f/%d;" + \
"\n\tmean/max description len = %.2f/%d;" + \
"\n\tmean/max name len = %.2f/%d;" + \
"\n\tno. instances with missing classes = %d") % \
(np.mean(valid_len_x), max(valid_len_x), np.mean(valid_len_loc), max(valid_len_loc), \
np.mean(valid_len_desc), max(valid_len_desc), np.mean(valid_len_name), max(valid_len_name), valid_miss_y)
#train model
with tf.Graph().as_default(), tf.Session() as sess:
tf.set_random_seed(cf.seed)
initializer = tf.contrib.layers.xavier_initializer()
mtrains, mvalids = [], []
with tf.variable_scope("model", reuse=None, initializer=initializer):
mtrains.append(TGP(is_training=True, vocab_size=len(idxvocab), num_steps=cf.bucket_sizes[0], \
num_classes=len(classes), num_timezones=len(tzxid), loc_vsize=len(locxid), \
desc_vsize=len(descxid), name_vsize=len(namexid), config=cf))
with tf.variable_scope("model", reuse=True, initializer=initializer):
if len(cf.bucket_sizes) > 1:
for b in cf.bucket_sizes[1:]:
mtrains.append(TGP(is_training=True, vocab_size=len(idxvocab), num_steps=b, \
num_classes=len(classes), num_timezones=len(tzxid), loc_vsize=len(locxid), \
desc_vsize=len(descxid), name_vsize=len(namexid), config=cf))
for b in cf.bucket_sizes:
mvalids.append(TGP(is_training=False, vocab_size=len(idxvocab), num_steps=b, \
num_classes=len(classes), num_timezones=len(tzxid), loc_vsize=len(locxid), \
desc_vsize=len(descxid), name_vsize=len(namexid), config=cf))
tf.initialize_all_variables().run()
#save model every epoch
if cf.save_model:
if not os.path.exists(os.path.join(cf.output_dir, cf.output_prefix)):
os.makedirs(os.path.join(cf.output_dir, cf.output_prefix))
#create saver object to save model
saver = tf.train.Saver()
#train model
reps = None
prev_acc = 0.0
for i in xrange(cf.epoch_size):
print "\nEpoch =", i
#run a train epoch
run_epoch(train_data, mtrains, True)
#run a valid epoch
reps, acc = run_epoch(valid_data, mvalids, False)
if cf.save_model:
if acc > prev_acc:
saver.save(sess, os.path.join(cf.output_dir, cf.output_prefix, "model.ckpt"))
prev_acc = acc
else:
saver.restore(sess, os.path.join(cf.output_dir, cf.output_prefix, "model.ckpt"))
print "\tNew valid performance > prev valid performance: restoring previous parameters..."
#save time parameters
if cf.save_model:
if not os.path.exists(os.path.join(cf.output_dir, cf.output_prefix)):
os.makedirs(os.path.join(cf.output_dir, cf.output_prefix))
np.save(open(os.path.join(cf.output_dir, cf.output_prefix, "rep.npy"), "w"), reps)
#feature ID information
cPickle.dump((vocabxid, tzxid, locxid, descxid, namexid, classes), \
open(os.path.join(cf.output_dir, cf.output_prefix, "feature_ids.pickle"), "w"))
#create a dictionary object for config
cf_dict = {}
for k,v in vars(cf).items():
if not k.startswith("__"):
cf_dict[k] = v
cPickle.dump(cf_dict, open(os.path.join(cf.output_dir, cf.output_prefix, "config.pickle"), "w"))
|
jhlau/twitter-deepgeo
|
geo_train.py
|
Python
|
apache-2.0
| 8,962
|
'''
Atomix project, plot_atoms.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
import sys, os
from pyparse_util import *
import matplotlib
import matplotlib.pyplot as plt
FIGSIZE_PLOT_ATOMS = (70,70)
def plot_atoms_i_horz(tfname, atom_stats_sorted, atom_labels_sorted, tfname_labelmap):
atomids = [atomid for (atomid, stat_tup) in atom_stats_sorted]
stat_tups = [stat_tup for (atomid, stat_tup) in atom_stats_sorted]
means = [mean for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
medians = [median for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
mins = [minn for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
maxs = [maxx for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
pctls95 = [pctl95 for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
plt.figure()
####plt.margins(0.1) #extend axis limits by this fraction
####plt.subplots_adjust(left=0.15, right=0.75, bottom=0.1, top=0.9)
plt.subplots_adjust(left=0.15, right=0.9, bottom=0.1, top=0.85)
#plt.plot(c0_ts, c0_st, 'r', c1_ts, c1_st, 'g', c2_ts, c2_st, 'b', c3_ts, c3_st, 'k')
ids = range(len(atomids))
f = open(tfname_labelmap, 'w')
for id_, atomid, label in zip(ids, atomids, atom_labels_sorted):
f.write('%s,%s,%s\n' % (id_,atomid,label))
f.close()
plt.plot(ids, means , 'b-')
plt.plot(ids, maxs , 'r+-')
plt.plot(ids, mins , 'g.-')
plt.plot(ids, medians , 'k--')
plt.plot(ids, pctls95 , 'k-.')
xs = ids
#ys = maxs
#ys = means
ys = mins
ats = atom_labels_sorted
#for x, y, at, atid in zip(xs, ys, ats, atomids):
# #plt.annotate(at, xy=(x,y), rotation=60)
# #plt.annotate(at, xy=(y*1.4,x), rotation=0, fontsize='x-small')
# plt.annotate(atid, xy=(x, y), rotation=0, fontsize='x-small')
####plt.annotate(atid, xy=(x, y - 100), rotation=0, fontsize='x-small')
#
plt.xlabel('Atom index')
plt.ylabel('Cycles')
plt.grid(True)
legendvals = ['mean', 'max', 'min', 'median','95%tile']
#fig = plt.gcf()
#fig.set_size_inches(18.5,10.5)
#------
#plt.legend(legendvals, loc='lower right')
#------
plt.legend(legendvals, loc='upper left')
#------
## Shink current axis by 20%
#ax = plt.subplot(111)
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
## Put a legend to the right of the current axis
##plt.legend(legendvals)
#ax.legend(legendvals, loc='center left', bbox_to_anchor=(1, 0.5))
#------
## ax = plt.subplot(111)
## # Shink current axis's height by 10% on the bottom
## box = ax.get_position()
## ax.set_position([box.x0, box.y0 + box.height * 0.1,
## box.width, box.height * 0.9])
## # Put a legend below current axis
## #ax.legend(worst_top_labels, loc='upper center', bbox_to_anchor=(0.5, -0.05))#,
## ax.legend(legendvals, loc='upper center', bbox_to_anchor=(0.5, -0.13),
## fancybox=True, shadow=True, ncol=5)
##
#plt.show()
tfname_pdf = tfname + ".pdf"
plt.savefig(tfname_pdf)
plt.title('Atom compute times and predictability\n%s' % os.getcwd().split('orsys')[1])
plt.savefig(tfname)
#plt.savefig('%s_plot_states.png' % tf, pad_inches=2)
def plot_atoms_i(tfname, atom_stats_sorted, atom_labels_sorted):
atomids = [atomid for (atomid, stat_tup) in atom_stats_sorted]
stat_tups = [stat_tup for (atomid, stat_tup) in atom_stats_sorted]
means = [mean for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
medians = [median for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
mins = [minn for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
maxs = [maxx for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
pctls95 = [pctl95 for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
plt.figure(figsize=FIGSIZE_PLOT_ATOMS)
plt.margins(0.1) #extend axis limits by this fraction
plt.subplots_adjust(left=0.15, right=0.75, bottom=0.1, top=0.85)
plt.title('Atom compute times and predictability\n%s' % os.getcwd().split('orsys')[1])
#plt.plot(c0_ts, c0_st, 'r', c1_ts, c1_st, 'g', c2_ts, c2_st, 'b', c3_ts, c3_st, 'k')
ids = range(len(atomids))
plt.plot(means ,ids, 'b.-')
plt.plot(maxs ,ids, 'r.-')
plt.plot(mins ,ids, 'g.-')
plt.plot(medians ,ids, 'k.--')
plt.plot(pctls95 ,ids, 'k.-')
xs = ids
#ys = maxs
ys = means
ats = atom_labels_sorted
for x, y, at, atid in zip(xs, ys, ats, atomids):
#plt.annotate(at, xy=(x,y), rotation=60)
ann = '%s, %s' % (y, at)
#plt.annotate(ann, xy=(y*1.4,x), rotation=0, fontsize='x-small')
plt.annotate(ann, xy=(y+20,x), rotation=0, fontsize='x-small')
#plt.xlabel('Action ID')
#plt.ylabel('Cycles')
plt.ylabel('Atom ID')
plt.xlabel('Cycles')
plt.grid(True)
legendvals = ['mean', 'max', 'min', 'median','95%tile']
plt.legend(legendvals, loc='lower right')
#------
ax = plt.subplot(111)
# Shink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
#ax.legend(worst_top_labels, loc='upper center', bbox_to_anchor=(0.5, -0.05))#,
ax.legend(legendvals, loc='upper center', bbox_to_anchor=(0.5, -0.13),
fancybox=True, shadow=True, ncol=5)
#plt.show()
plt.savefig(tfname)
#plt.savefig('%s_plot_states.png' % tf, pad_inches=2)
def plot_atomis_i(tfname, atomi_stats, atomi_labels, atomi_durs):
plot_atomis_i_one_figure(tfname, atomi_stats, atomi_labels, atomi_durs)
plot_atomis_i_sep_figures(tfname, atomi_stats, atomi_labels, atomi_durs)
def plot_atomis_i_sep_figures(tfname, atomi_stats, atomi_labels, atomi_durs):
atomi_stats_fg, atomi_stats_ii, atomi_stats_fd, atomi_stats_fw, atomi_stats_fgmw = atomi_stats
atomi_stats_a = [atomi_stats_fg, atomi_stats_fw, atomi_stats_fgmw, atomi_stats_ii, atomi_stats_fd]
#plt.figure(figsize=(20,10))
#plt.figure(figsize=(50,20))
legendvals = ['mean', 'max', 'min', 'median','95%tile']
titles = ['FIFO get buffer times', 'FIFO_wait_times', 'Excl FIFO get times', 'Block implementation call', 'FIFO return buffer times']
plotted_atomids = []
#plot stats
horz = True
btfname = tfname
for ii in [0,1,2,3,4]:
stats = atomi_stats_a[ii]
atomids = [atomid for (atomid, stat_tup) in stats]
stat_tups = [stat_tup for (atomid, stat_tup) in stats]
means = [mean for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
medians = [median for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
mins = [minn for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
maxs = [maxx for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
pctls95 = [pctl95 for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
tfname = btfname.split('.png')[0] + "subplot_" + str(ii) + '.png'
plt.figure(figsize=(8,8))
#plt.subplot(2,5,ii+1)
plotted_atomids = atomids
#plt.margins(0.1) #extend axis limits by this fraction
plt.margins(0.05) #extend axis limits by this fraction
plt.title(titles[ii])
ids = range(len(atomids))
if (horz):
plt.plot(ids, means , 'b.-')
plt.plot(ids, maxs , 'r.-')
plt.plot(ids, mins , 'g.-')
plt.plot(ids, medians , 'k.--')
plt.plot(ids, pctls95 , 'k.-')
plt.xlabel('Atom ID')
plt.ylabel('Cycles')
else:
plt.plot(means ,ids, 'b.-')
plt.plot(maxs ,ids, 'r.-')
plt.plot(mins ,ids, 'g.-')
plt.plot(medians ,ids, 'k.--')
plt.plot(pctls95 ,ids, 'k.-')
plt.ylabel('Atom ID')
plt.xlabel('Cycles')
plt.grid(True)
plt.savefig(tfname)
# ax = plt.subplot(2,5,1)
# ys = ids
# ats = plotted_atomids
#
# if not horz:
# for y, at in zip(ys, ats):
# ax.annotate(atomi_labels[at], xy=(0,y), rotation=0, fontsize='x-small')
#
# ax = plt.subplot(2,5,5)
# ax.legend(legendvals, loc='center left', bbox_to_anchor=(1, 0.5))
#plot timeseries
atomi_durs_fg, atomi_durs_ii, atomi_durs_fd, atomi_durs_fw, atomi_durs_fgmw = atomi_durs
atomi_durs_a = [atomi_durs_fg, atomi_durs_fw, atomi_durs_fgmw, atomi_durs_ii, atomi_durs_fd]
for ii in [0,1,2,3,4]:
labels = []
durs = atomi_durs_a[ii]
print durs
#durs_fg[idv1] = [t_fg]
#plt.subplot(2,5,ii+6)
tfname = btfname.split('.png')[0] + "subplot_" + str(ii + 5) + '.png'
plt.figure(figsize=(8,8))
for atomid, durations in durs:
plt.plot(durations[0:200], '.-')
labels.append(atomi_labels[atomid])
plt.ylabel('Cycles')
plt.xlabel('Invocation number')
plt.grid(True)
plt.title('Timeseries')
plt.savefig(tfname)
#ax = plt.subplot(2,5,10)
#ax.legend(labels, loc='center left', bbox_to_anchor=(1, 0.5), fontsize='x-small')
#plt.suptitle('Atom internals and FIFO module costs')
#plt.show()
#plt.savefig(tfname)
def plot_atomis_i_one_figure(tfname, atomi_stats, atomi_labels, atomi_durs):
atomi_stats_fg, atomi_stats_ii, atomi_stats_fd, atomi_stats_fw, atomi_stats_fgmw = atomi_stats
atomi_stats_a = [atomi_stats_fg, atomi_stats_fw, atomi_stats_fgmw, atomi_stats_ii, atomi_stats_fd]
#plt.figure(figsize=(20,10))
plt.figure(figsize=(50,20))
legendvals = ['mean', 'max', 'min', 'median','95%tile']
titles = ['FIFO get buffer times', 'FIFO_wait_times', 'Excl FIFO get times', 'Block implementation call', 'FIFO return buffer times']
plotted_atomids = []
#plot stats
horz = True
for ii in [0,1,2,3,4]:
stats = atomi_stats_a[ii]
atomids = [atomid for (atomid, stat_tup) in stats]
stat_tups = [stat_tup for (atomid, stat_tup) in stats]
means = [mean for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
medians = [median for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
mins = [minn for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
maxs = [maxx for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
pctls95 = [pctl95 for (cnt, mean, std, minn, maxx, diff, median, pctl95) in stat_tups]
plt.subplot(2,5,ii+1)
plotted_atomids = atomids
#plt.margins(0.1) #extend axis limits by this fraction
plt.margins(0.05) #extend axis limits by this fraction
plt.title(titles[ii])
ids = range(len(atomids))
if (horz):
plt.plot(ids, means , 'b.-')
plt.plot(ids, maxs , 'r.-')
plt.plot(ids, mins , 'g.-')
plt.plot(ids, medians , 'k.--')
plt.plot(ids, pctls95 , 'k.-')
plt.xlabel('Atom ID')
plt.ylabel('Cycles')
else:
plt.plot(means ,ids, 'b.-')
plt.plot(maxs ,ids, 'r.-')
plt.plot(mins ,ids, 'g.-')
plt.plot(medians ,ids, 'k.--')
plt.plot(pctls95 ,ids, 'k.-')
plt.ylabel('Atom ID')
plt.xlabel('Cycles')
plt.grid(True)
ax = plt.subplot(2,5,1)
ys = ids
ats = plotted_atomids
if not horz:
for y, at in zip(ys, ats):
ax.annotate(atomi_labels[at], xy=(0,y), rotation=0, fontsize='x-small')
ax = plt.subplot(2,5,5)
ax.legend(legendvals, loc='center left', bbox_to_anchor=(1, 0.5))
#plot timeseries
atomi_durs_fg, atomi_durs_ii, atomi_durs_fd, atomi_durs_fw, atomi_durs_fgmw = atomi_durs
atomi_durs_a = [atomi_durs_fg, atomi_durs_fw, atomi_durs_fgmw, atomi_durs_ii, atomi_durs_fd]
for ii in [0,1,2,3,4]:
labels = []
durs = atomi_durs_a[ii]
print durs
#durs_fg[idv1] = [t_fg]
plt.subplot(2,5,ii+6)
for atomid, durations in durs:
plt.plot(durations[0:200], '.-')
labels.append(atomi_labels[atomid])
plt.ylabel('Cycles')
plt.xlabel('Invocation number')
plt.grid(True)
plt.title('Timeseries')
ax = plt.subplot(2,5,10)
ax.legend(labels, loc='center left', bbox_to_anchor=(1, 0.5), fontsize='x-small')
plt.suptitle('Atom internals and FIFO module costs')
#plt.show()
plt.savefig(tfname)
def plot_atoms(tf, atom_stats_sorted, atom_labels, atom_stats_norm, conf={}):
tfname = '%s_plot_atoms.png' % tf
plot_atoms_i(tfname, atom_stats_sorted, atom_labels)
tfname = '%s_plot_atoms_norm.png' % tf
plot_atoms_i(tfname, atom_stats_norm, atom_labels)
tfname = '%s_plot_atoms_horz.png' % tf
tfname_labelmap = '%s_plot_atoms_horz_labelmap.txt' % tf
plot_atoms_i_horz(tfname, atom_stats_sorted, atom_labels, tfname_labelmap)
tfname = '%s_plot_atoms_norm_horz.png' % tf
tfname_labelmap = '%s_plot_atoms_norm_horz_labelmap.txt' % tf
plot_atoms_i_horz(tfname, atom_stats_norm, atom_labels, tfname_labelmap)
def plot_atomis(tf, atomi_stats, atomi_labels, atomi_durs):
tfname = '%s_plot_atomis.png' % tf
plot_atomis_i(tfname, atomi_stats, atomi_labels, atomi_durs)
def plot_atom_timeseries(tf, atom_stats_norm, all_durs, atom_labels_dict, more_atomids=[], conf={}):
tfname = '%s_plot_atoms_timeseries.png' % tf
worst_how_many = 3
#fs = ['r.-','g.-','b.-']
#re-sort by normalized max
atom_stats_norm_resorted = sorted(atom_stats_norm, key=lambda st: st[1][4], reverse=True)
worst_top = []
worst_top_labels = []
for atom_stat in atom_stats_norm_resorted[0:worst_how_many]:
atom_id, (cnt, mean, std, minn, maxx, diff, median, pctl95) = atom_stat
worst_top.append(atom_id)
worst_top_labels.append(atom_labels_dict[atom_id])
print atom_labels_dict[atom_id]
print more_atomids
if more_atomids:
for atom_stat in atom_stats_norm_resorted:
atom_id, (cnt, mean, std, minn, maxx, diff, median, pctl95) = atom_stat
if atom_id in more_atomids:
print 'adding atomid to timeseries', atom_id
worst_top.append(atom_id)
worst_top_labels.append(atom_labels_dict[atom_id])
print atom_labels_dict[atom_id]
plt.figure()
plt.figure(figsize=(160,8))
plt.margins(0.1) #extend axis limits by this fraction
plt.subplots_adjust(left=0.15, right=0.9, bottom=0.2, top=0.85)
plt.title('Atom compute times timeseries\n%s' % os.getcwd().split('orsys')[1])
#for atom_id, f in zip(worst_top, fs):
for atom_id in worst_top:
#plt.plot(all_durs[atom_id], f)
plt.plot(all_durs[atom_id])
#plt.plot(range(1,1+len(all_durs[atom_id])), [int(t) for t in all_durs[atom_id]], f)
#plt.plot(range(1,1+len(all_durs[atom_id])), [int(t) for t in all_durs[atom_id]], '.-')
plt.ylabel('Cycles')
plt.xlabel('Call number')
plt.grid(True)
#------
ax = plt.subplot(111)
# Shink current axis by 20%
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
#plt.legend(worst_top_labels, loc='upper right')
#ax.legend(worst_top_labels, loc='center left', bbox_to_anchor=(1, 0.5))
#------
# Shink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.2,
box.width, box.height * 0.8])
# Put a legend below current axis
#ax.legend(worst_top_labels, loc='upper center', bbox_to_anchor=(0.5, -0.05))#,
ax.legend(worst_top_labels, loc='upper center', bbox_to_anchor=(0.5, -0.13))#,
# fancybox=True, shadow=True, ncol=5)
#plt.show()
plt.savefig(tfname)
def plot_atom_tseqs(tf, atid_tseqs, atom_labels_dict):
tfname = '%s_plot_atoms_tseqs.png' % (tf)
plt.figure()
plt.figure(figsize=(30,30))
plt.title('AtomId time sequences\n%s' % (os.getcwd().split('orsys')[1]))
all_xs = []
all_ys = []
dy = 0.2
#f=['r.-','g.-','b.-','k.-']
f=['r-','g-','b-','k-']
for coreid, atid_tseq in enumerate(atid_tseqs):
xs = []
ys = []
for (atid, (ts1, ts2)) in atid_tseq:
#ys.append(0)
ys.append(atid + coreid * dy)
ys.append(atid + coreid * dy)
#ys.append(0)
#xs.append(ts1)
xs.append(ts1)
xs.append(ts2)
#xs.append(ts2)
y = atid + coreid * dy
plt.plot([ts1, ts2], [y,y], f[coreid])
all_xs.append(xs)
all_ys.append(ys)
#plt.plot(all_xs[0], all_ys[0], 'r.-', linewidth=3)
#plt.plot(all_xs[1], all_ys[1], 'g.-', linewidth=3)
#plt.plot(all_xs[2], all_ys[2], 'b.-', linewidth=3)
#plt.plot(all_xs[3], all_ys[3], 'k.-', linewidth=3)
plt.ylabel('AtomId')
plt.xlabel('Cycle')
plt.grid(True)
plt.savefig(tfname)
#----------------------------------------------
f=['r.-','g.-','b.-','k.-']
for coreid, atid_tseq in enumerate(atid_tseqs):
tfname = '%s_plot_atoms_tseqs_%d.png' % (tf, coreid)
plt.figure()
plt.figure(figsize=(30,30))
plt.title('AtomId time sequence, core %d\n%s' % (coreid,os.getcwd().split('orsys')[1]))
xs = []
ys = []
for (atid, (ts1, ts2)) in atid_tseq:
xs = []
ys = []
ys.append(0)
ys.append(atid + coreid * dy)
ys.append(atid + coreid * dy)
ys.append(0)
xs.append(ts1)
xs.append(ts1)
xs.append(ts2)
xs.append(ts2)
y = atid + coreid * dy
plt.plot(xs, ys, f[coreid])
plt.ylabel('AtomId')
plt.xlabel('Cycle')
plt.grid(True)
plt.savefig(tfname)
#----------------------------------------------
fig, axs = plt.subplots(4, sharex=True, sharey=True, figsize=(30,30))
#plt.figure(figsize=(30,30))
f=['r.-','g.-','b.-','k.-']
#pltids=[411,412,413,414]
tfname = '%s_plot_atoms_tseqs_all.png' % (tf)
#f, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True, sharey=True)
#ax1.plot(x, y)
#ax1.set_title('Sharing both axes')
#ax2.scatter(x, y)
#ax3.scatter(x, 2 * y ** 2 - 1, color='r')
# Fine-tune figure; make subplots close to each other and hide x ticks for
# all but bottom plot.
for coreid, atid_tseq in enumerate(atid_tseqs):
#ax = plt.subplot(pltids[coreid])
xs = []
ys = []
for (atid, (ts1, ts2)) in atid_tseq:
xs = []
ys = []
ys.append(0)
ys.append(atid + coreid * dy)
ys.append(atid + coreid * dy)
ys.append(0)
xs.append(ts1)
xs.append(ts1)
xs.append(ts2)
xs.append(ts2)
y = atid + coreid * dy
#ax.plot(xs, ys, f[coreid])
#ax.grid(True)
axs[coreid].plot(xs, ys, f[coreid])
axs[coreid].grid(True)
plt.ylabel('AtomId')
plt.xlabel('Cycle')
#plt.title('AtomId time sequence\n%s' % (os.getcwd().split('orsys')[1]))
axs[0].set_title('AtomId time sequence\n%s' % (os.getcwd().split('orsys')[1]))
fig.subplots_adjust(hspace=0)
#plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
plt.savefig(tfname)
#------------------------------------------------------------
|
manub686/atomix
|
tracescripts/plot_atoms.py
|
Python
|
apache-2.0
| 18,714
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import renames_v2
from tensorflow.tools.compatibility import reorders_v2
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
}
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames, use the OLD function name.
# These renames happen after the arguments have been processed.
self.manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_image_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.batch_and_drop_remainder":
"tf.compat.v1.contrib.data.batch_and_drop_remainder",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.padded_batch_and_drop_remainder":
"tf.compat.v1.contrib.data.padded_batch_and_drop_remainder",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.read_batch_features":
"tf.compat.v1.contrib.data.read_batch_features",
"tf.contrib.data.reduce_dataset":
"tf.compat.v1.contrib.data.reduce_dataset",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.sliding_window_batch":
"tf.compat.v1.contrib.data.sliding_window_batch",
"tf.contrib.data.sloppy_interleave":
"tf.compat.v1.contrib.data.sloppy_interleave",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.rnn.RNNCell":
"tf.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.pywrap_tensorflow":
"tf.compat.v1.pywrap_tensorflow",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
"tf.zeros_initializer":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.initializers.truncated_normal",
}
# pylint: enable=line-too-long
# Mapping from function to the new name of the function
self.symbol_renames = renames_v2.renames
self.symbol_renames.update(self.manual_symbol_renames)
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_gather",
"tf.batch_to_space",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = reorders_v2.reorders
# Specially handled functions.
self.function_handle = {
"tf.batch_gather": self._batch_gather_handler,
"tf.nn.dropout": self._dropout_handler,
"tf.gradients": self._colocate_handler("tf.gradients"),
"*.minimize": self._colocate_handler("Optimizer.minimize"),
"*.compute_gradients":
self._colocate_handler("Optimizer.compute_gradients"),
}
decay_function_comment = (
"WARNING: <function name> has been changed to return a callable instead"
" of a tensor when graph building, but its functionality remains "
"unchanged during eager execution (returns a callable like "
"before). The converter cannot detect and fix this reliably, so "
"this usage has been converted to compat.v1 (even though it may already"
" be correct).\n"
)
# TODO(b/118888586): add default value change to update script.
default_loss_reduction_changed = (
"WARNING: default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE.\n"
)
assert_return_type_comment = (
"WARNING: assert_* functions have been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
"WARNING: assert_rank_* functions have been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
tf_01s_like_no_optimize_comment = (
"WARNING: tf.zeros_like and tf.ones_like no longer have the optimize "
"argument in TF 2.0 or after (also, `tensor' argument is renamed to "
"`input')."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
deprecate_partition_strategy_comment = (
"WARNING: `partition_strategy` has been removed from `%s` "
" The 'div' strategy is used by default.")
initializers_no_dtype_comment = (
"WARNING: tf.initializers and tf.keras.initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"call method in TF 2.0 and after. The only API symbols are now "
"tf.keras.initializers.* or tf.initializers.*."
"\nThe calls have been converted to compat.v1 for safety (even though "
"they may already have been correct).")
uniform_unit_scaling_initializer_comment = (
"WARNING: uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
metrics_comment = (
"WARNING: tf.metrics have been converted to object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
"WARNING: tf.losses have been converted to object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
self.function_warnings = {
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.cond":
"tf.cond no longer takes 'strict'. "
"Now 'strict' defaults to True."
"fn1/fn2 arguments are replaced by true_fn/false_fn.",
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.device":
"tf.device no longer takes function as an argument. "
"'devide_name_or_function' argument has been renamed to "
"'device_name'.",
"tf.flags":
"tf.flags has been removed, please use the argparse or absl"
" module if you need command line parsing.",
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.estimator.LinearClassifier":
default_loss_reduction_changed,
"tf.estimator.LinearRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedClassifier":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineRegressor":
default_loss_reduction_changed,
"tf.hessians":
"tf.hessians no longer takes "
"'colocate_gradients_with_ops' argument. Also, "
"arguments have been reordered so that 'name' is the "
"last argument.",
"tf.nn.conv1d":
"WARNING: use_cudnn_on_gpu argument has been removed and \"value\""
" was renamed to \"input\"",
"tf.nn.conv2d":
"WARNING: use_cudnn_on_gpu argument has been removed and "
"\"filter\" was renamed to \"filters\"",
"tf.nn.conv2d_backprop_filter":
"WARNING: use_cudnn_on_gpu argument has been removed",
"tf.nn.conv2d_backprop_input":
"WARNING: use_cudnn_on_gpu argument has been removed and "
"\"filter\" was renamed to \"filters\"",
"tf.nn.erosion2d":
"WARNING: <function name> now requires a data_format argument",
"tf.nn.nce_loss":
deprecate_partition_strategy_comment % "tf.nn.nce_loss",
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment %
"tf.nn.safe_embedding_lookup_sparse",
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment % "tf.nn.sampled_softmax_loss",
"tf.zeros_like":
tf_01s_like_no_optimize_comment,
"tf.ones_like":
tf_01s_like_no_optimize_comment,
"tf.nn.embedding_lookup":
"WARNING: validate_indices argument has been removed.",
"tf.while_loop":
"tf.while_loop no longer takes 'return_same_structure' argument. "
"'return_same_structure' now defaults to True. Also, 'name'"
"argument is now the last argument.",
"tf.image.sample_distorted_bounding_box":
"tf.image.sample_distorted_bounding_box no longer takes 'seed2' "
"argument.",
"tf.nn.ctc_beam_search_decoder":
"tf.nn.ctc_beam_search_decoder no longer takes 'merge_repeated' "
"argument. 'merge_repeated' now defaults to False.",
"tf.nn.fractional_avg_pool":
"tf.nn.fractional_avg_pool no longer takes 'seed2' and "
"'deterministic' arguments. Now it takes a single 'seed' arg. If "
"'seed' is zero, the execution is random and deterministic "
"otherwise",
"tf.nn.fractional_max_pool":
"tf.nn.fractional_max_pool no longer takes 'seed2' and "
"'deterministic' arguments. Now it takes a single 'seed' arg. If "
"'seed' is zero, the execution is random and deterministic "
"otherwise",
"tf.nn.softmax_cross_entropy_with_logits":
"tf.nn.softmax_cross_entropy_with_logits behavior has changed. "
"'labels' needs to be wrapped with tf.stop_gradient to keep the "
"old behavior. Also, 'dim' argument has been renamed to 'axis'.",
"tf.test.assert_equal_graph_def":
"tf.assert_equal_graph_def no longer takes 'checkpoint_v2' "
"argument. 'checkpoint_v2' now defaults to True.",
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.uniform_unit_scaling":
uniform_unit_scaling_initializer_comment,
"tf.uniform_unit_scaling_initializer":
uniform_unit_scaling_initializer_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
}
self.symbol_renames = {
name: new_name
for name, new_name in self.symbol_renames.items()
}
export_saved_model_renamed = (
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
make_initializable_iterator_deprecation = (
"(Manual edit required) The "
"`tf.data.Dataset.make_initializable_iterator()` method has been "
"removed. If you are using the Estimator API, you can return a dataset "
"directly from your input functions without creating an iterator. "
"As a last resort, please replace calls to that method on `dataset` "
"with a call to "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
make_one_shot_iterator_deprecation = (
"(Manual edit required) The "
"`tf.data.Dataset.make_one_shot_iterator()` method has been "
"removed. If you are using eager execution, you can iterate over "
"`dataset` using a Python `for` loop. If you are using the Estimator "
"API, you can return a dataset directly from your input functions "
"without creating an iterator. As a last resort, please replace calls "
"to that method on `dataset` with a call to "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
# Specify warnings for functions that aren't restricted to the tf.x.y.z
# format. This should only be used for methods with unique names, e.g.
# export_savedmodel, which is only defined in Estimator objects.
self.unrestricted_function_warnings = {
"export_savedmodel": export_saved_model_renamed,
"make_initializable_iterator": make_initializable_iterator_deprecation,
"make_one_shot_iterator": make_one_shot_iterator_deprecation,
}
@staticmethod
def _dropout_handler(file_edit_recorder, node, lines):
del lines
if len(node.args) < 2:
comment = ("ERROR: tf.nn.dropout did not take arguments, so automatic "
"transformation was disabled. tf.nn.dropout has changed "
"the semantics of the second argument.")
file_edit_recorder.add(
comment,
node.lineno,
node.col_offset,
"tf.nn.dropout",
"tf.nn.dropout",
error="tf.nn.dropout requires manual check.")
else:
comment = ("WARNING: tf.nn.dropout has changed the semantics of the "
"second argument. Please check the transformation.\n")
file_edit_recorder.add(
comment,
node.args[1].lineno,
node.args[1].col_offset,
"",
"1 - ")
@staticmethod
def _colocate_handler(name):
def _helper(file_edit_recorder, node, lines):
"""Handler for updating colocate arguments."""
del lines
for keyword in node.keywords:
if keyword.arg == "colocate_gradients_with_ops":
# TODO(jhseu): Since ast_edit.py does string replacement, there's no
# straightforward way to remove the argument. Try to fix before 2.0 is
# final.
comment = ("For tf.gradients and tf.Optimizer.minimize, "
"colocate_gradients_with_op has been removed and now "
"defaults to True.")
file_edit_recorder.add(
comment,
node.lineno,
node.col_offset,
"",
"",
error="{} requires manual check.".format(name))
return _helper
@staticmethod
def _batch_gather_handler(file_edit_recorder, node, lines):
lineno = node.lineno
column = node.col_offset
# Find the position to add the batch_dims argument. We add it as the
# first argument, since that's easiest. This is safe because we included
# batch_gather in self.reordered_function_names, so it will have all
# of its arguments changed to keyword arguments.
m = re.match(r"tf\s*\.\s*batch_gather\s*\(", lines[lineno - 1][column:])
if m is not None:
file_edit_recorder.add(
"Added keyword argument 'batch_dims=-1' to 'tf.batch_gather'",
lineno, column + m.end(), "", "batch_dims=-1, ")
else:
file_edit_recorder.add(
"Unable to add keyword argument 'batch_dims=-1' to 'tf.batch_gather'",
lineno, column, "", "",
error="Unable to add keyword argument batch_dims=-1 to "
"tf.batch_gather; please add it manually.")
|
hfp/tensorflow-xsmm
|
tensorflow/tools/compatibility/tf_upgrade_v2.py
|
Python
|
apache-2.0
| 43,754
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
from __future__ import unicode_literals
from django.db import models
# from djcelery import models
from applications import Applications
from virtual_machines import VirtualMachines
from users import Users
class InstalledApplications(models.Model):
installed_app_id = models.AutoField(primary_key=True)
workspace = models.CharField(max_length=45, blank=True)
clx_ip = models.CharField(max_length=45, blank=True)
public_port = models.IntegerField(blank=True, null=True)
private_port = models.IntegerField(blank=True, null=True)
user = models.ForeignKey(Users)
application = models.ForeignKey(Applications)
virtual_machine = models.ForeignKey(VirtualMachines)
class Meta:
managed = False
db_table = 'Installed_applications'
# app_label = 'database'
|
Dev-Cloud-Platform/Dev-Cloud
|
dev_cloud/database/models/installed_applications.py
|
Python
|
apache-2.0
| 1,472
|
import treeprint
import copy
def traverse(tree,ident = 1):
me = treeprint.treeprinter(tree)
me.printt()
class node:
def __init__(self,name = ""):
self.parent = None
self.children = [] # stack with all the nocdes
self.name = name
self.attr = {}
def appendchild(self,name=None):
self.children.append(node())
self.children[self.countchild()-1].parent = self
if name != None:
self.children[self.countchild()-1].name = name
return self.children[-1]
def appendChild(self,child):
self.children.append(child)
def getChild(self,nr):
if len(self.children) < nr:
return None
else:
return self.children[nr]
def countchild(self):
return len(self.children)
def setname(self,name):
self.name = name
def getchildren(self):
return self.children
def appendchilds(self,num):
for i in range(0,num):
self.appendchild()
def set(self,name,val):
self.attr[name] = val
def get(self,name):
return self.attr[name]
def copy(self):
return copy.deepcopy(self)
"""
test = node("COMPOUND")
x = test.appendchild("ASSIGN")
y = x.appendchild("VARIABLE")
y.set("name","alpha")
z = x.appendchild("NEGATE")
a = z.appendchild("INTEGER CONSTANT")
a.set("value",58)
# Middle side
x = test.appendchild("ASSIGN")
r = x.appendchild("VARIABLE")
z.set("name","beta")
z = x.appendchild("INTEGER CONSTANT")
z.set("value","99")
x = test.appendchild("ASSIGN")
z = x.appendchild("VARIABLE")
z.set("name","result")
z = x.appendchild("ADD")
r = x
a = z.appendchild("ADD")
b = a.appendchild("VARIABLE")
b.set("name","alpha")
b = a.appendchild("FLOAT DIVIDE")
c = b.appendchild("INTEGER CONSTANT")
c.set("value","3")
c = b.appendchild("SUBTRACT")
d = c.appendchild("VARIABLE")
d.set("name","beta")
d = c.appendchild("VARIABLE")
d.set("name","gamma")
d = r.appendchild("INTEGER CONSTANT")
d.set("value","5")
traverse(test)
"""
|
christ2go/pycal
|
src/tree/node.py
|
Python
|
apache-2.0
| 2,033
|
# Copyright (c) 2013-2016, Freja Nordsiek
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import tempfile
import numpy as np
import h5py
import hdf5storage
# A series of tests to make sure that structured ndarrays with a field
# that has an object dtype are written like structs (are HDF5 Groups)
# but are written as an HDF5 COMPOUND Dataset otherwise (even in the
# case that a field's name is 'O').
def test_O_field_compound():
name = '/a'
data = np.empty(shape=(1, ), dtype=[('O', 'int8'), ('a', 'uint16')])
with tempfile.TemporaryDirectory() as folder:
filename = os.path.join(folder, 'data.h5')
hdf5storage.write(data, path=name, filename=filename,
matlab_compatible=False,
structured_numpy_ndarray_as_struct=False)
with h5py.File(filename, mode='r') as f:
assert isinstance(f[name], h5py.Dataset)
def test_object_field_group():
name = '/a'
data = np.empty(shape=(1, ), dtype=[('a', 'O'), ('b', 'uint16')])
data['a'][0] = [1, 2]
with tempfile.TemporaryDirectory() as folder:
filename = os.path.join(folder, 'data.h5')
hdf5storage.write(data, path=name, filename=filename,
matlab_compatible=False,
structured_numpy_ndarray_as_struct=False)
with h5py.File(filename, mode='r') as f:
assert isinstance(f[name], h5py.Group)
def test_O_and_object_field_group():
name = '/a'
data = np.empty(shape=(1, ), dtype=[('a', 'O'), ('O', 'uint16')])
data['a'][0] = [1, 2]
with tempfile.TemporaryDirectory() as folder:
filename = os.path.join(folder, 'data.h5')
hdf5storage.write(data, path=name, filename=filename,
matlab_compatible=False,
structured_numpy_ndarray_as_struct=False)
with h5py.File(filename, mode='r') as f:
assert isinstance(f[name], h5py.Group)
|
frejanordsiek/hdf5storage
|
tests/test_ndarray_O_field.py
|
Python
|
bsd-2-clause
| 3,240
|
raise "no longer used since 20050302"
#coding: latin1
## Copyright 2004-2005 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
oogen : generate OpenOffice documents programmatically
Bibliography:
- http://books.evc-cit.info
Using OpenOffice.org's XML Data Format
- http://www.oooforum.org/forum/viewtopic.php?t=13861
Opening 2 csv text files into OOo Calc as separate sheets
- http://udk.openoffice.org/python/python-bridge.html
Python-UNO bridge
"""
import zipfile
import os.path
opj = os.path.join
from timtools.oogen.ifiles import IFILES
from timtools.ui import console
class OoGenerator:
"base clase for OoText,OoSpreadsheet,..."
extension = NotImplementedError
mimetype = NotImplementedError
def __init__(self,doc=None,filename=None):
if doc is None:
doc = Document()
self.doc = doc
self.tempDir = r'c:\temp'
if filename is None:
filename = self.doc.name
if not filename.lower().endswith(self.extension):
filename += self.extension
self.outputFilename = filename
self.ifiles = tuple([cl(self) for cl in IFILES])
def save(self):
job = console.job("Writing "+self.outputFilename)
for f in self.ifiles:
f.writeFile()
zf = zipfile.ZipFile(self.outputFilename,'w',
zipfile.ZIP_DEFLATED)
for f in self.ifiles:
zf.write(opj(self.tempDir,f.filename),f.filename)
zf.close()
job.done()
class OoText(OoGenerator):
extension = ".sxw"
officeClass = "text"
mimetype = "application/vnd.sun.xml.writer"
def writeBody(self,wr):
for elem in self.doc.story:
elem.__xml__(wr)
class OoSpreadsheet(OoGenerator):
extension = ".sxc"
officeClass = "spreadsheet"
mimetype = "application/vnd.sun.xml.calc"
def writeBody(self,wr):
for elem in self.doc.tables:
elem.__xml__(wr)
|
lsaffre/timtools
|
timtools/oogen/generators.py
|
Python
|
bsd-2-clause
| 2,798
|
from pandas import *
import numpy as num
from pysal import *
import os, glob
from osgeo import ogr
import csv
from pyGDsandbox.dataIO import df2dbf, dbf2df
import uuid
from pyproj import Proj, transform
from shapely.geometry import Polygon, shape
from osgeo import ogr, osr
import fiona
#read in data from geodatabase
driver = ogr.GetDriverByName("ESRI Shapefile")
normanlist = glob.glob('*****.dbf')
norman=glob.glob('*****.shp')
nlist = []
for m,n in zip(normanlist, range(len(normanlist))):
a=normanlist[n].rfind("Incidents")
b=normanlist[n].rfind(".dbf")
nlist.append(m[a:b])
table1=read_excel('*****.xls', 'Sheet1', index_col=None, na_values=['NA']).fillna(0)
for x, y, z in zip(normanlist, nlist, norman):
db= pysal.open(x, 'r')
d = {col: db.by_col(col) for col in db.header}
table1=DataFrame(d).fillna(0)
wgs84X=[]
wgs84Y=[]
inproj=Proj("+proj=lcc +lat_1=33.93333333333333 +lat_2=35.23333333333333 +lat_0=33.33333333333334 +lon_0=-98 +x_0=600000.0000000001 +y_0=0 +ellps=GRS80 +datum=NAD83 +to_meter=0.3048006096012192 +no_defs", preserve_units = True)
outProj = Proj('+proj=utm +zone=15 +ellps=WGS84 +datum=WGS84 +units=m +no_defs ', preserve_units = True)
for pol in fiona.open(z):
if str(type(pol['geometry']))=="<type 'NoneType'>":
wgs84X.append(0)
wgs84Y.append(0)
else:
myX=pol['geometry']['coordinates'][0]
myY=pol['geometry']['coordinates'][1]
a=transform(inproj,outProj,myX, myY)
wgs84X.append(a[0])
wgs84Y.append(a[1])
newtable1=DataFrame({})
newtable1["incid"] = table1["incident_n"]
newtable1["Reported_address"]=num.nan
#newtable1["incid"] = Series(range(1, len(newtable2)+1)).unique()
#newtable1["Location_name"]=num.nan
#for i in range(len(table1["incident_n"])):
newtable1["Reported_address"] = table1["CVaddress"] #str(table1["CVaddress"][i])+",Norman,OK"
for i in range(len(table1["CVaddress"])):
if newtable1["Reported_address"][i] != '' and newtable1["Reported_address"][i] != 0 and newtable1["Reported_address"][i].strip().find("UNKNOWN") == -1 and newtable1["Reported_address"][i] != num.nan:
newtable1["Reported_address"][i]=str(newtable1["Reported_address"][i].strip())+",NORMAN,OK"
else:
newtable1["Reported_address"][i]="NORMAN,OK"
newtable1["Incident_date"]=num.nan
newtable1["Report_date"]=num.nan
newtable1["Incident_time"]=num.nan
newtable1["Report_time"]=num.nan
Incident_date=[0]*len(table1["CVaddress"])
Report_date=[0]*len(table1["CVaddress"])
for i in range(len(table1["CVaddress"])):
Incident_date[i] = table1["date_1"][i][0:10]
Report_date[i] = table1["date_repor"][i][0:10]
newtable1["Incident_date"] = Incident_date
newtable1["Report_date"] = Report_date
Incident_time=[0]*len(table1["CVaddress"])
for i in range(len(table1["CVaddress"])):
Incident_time[i] = table1["date_1"][i][11:]
newtable1["Incident_time"]=Incident_time
Report_time=[0]*len(table1["CVaddress"])
for i in range(len(table1["CVaddress"])):
Incident_time[i] = table1["date_repor"][i][11:]
newtable1["Report_time"]=Incident_time
newtable1["IBR"] = table1["NIBRSCode"]
newtable1["IBR_description"] = table1["NIBRSDesc"]
newtable1["Police_Department_Code"]= table1["Offense"]
newtable1["PD_description"] = table1["OffenseDes"]
#if "tiger_X" in db.header:
# newtable1["Latitude"] = table1["tiger_X"]
# newtable1["Longitude"] = table1["tiger_Y"]
#else:
newtable1["Latitude"] = wgs84Y
newtable1["Longitude"] = wgs84X
newtable1["State_Statute_Number"]=num.nan
newtable1["State_Statute_Literal"]=num.nan
newtable1["Global_ID"]=num.nan
newtable1["flag_geocode"]=num.nan
Report_date=[0]*len(table1["CVaddress"])
Incident_date=[0]*len(table1["CVaddress"])
Report_time=[0]*len(table1["CVaddress"])
Incident_time=[0]*len(table1["CVaddress"])
for i in range(len(newtable1["Global_ID"])):
Report_date[i]=str(newtable1["Report_date"][i])[0:4]+"-"+str(newtable1["Report_date"][i])[5:7]+"-"+str(newtable1["Report_date"][i])[8:]
Incident_date[i]=str(newtable1["Incident_date"][i])[0:4]+"-"+str(newtable1["Incident_date"][i])[5:7]+"-"+str(newtable1["Incident_date"][i])[8:]
if newtable1["Incident_time"][i] != num.nan:
Incident_time[i]=str(newtable1["Incident_time"][i])[0:2]+":"+str(newtable1["Incident_time"][i])[3:5]+":"+str(newtable1["Incident_time"][i])[6:]
else:
Incident_time[i]=num.nan
if newtable1["Report_time"][i] != num.nan:
Report_time[i]=str(newtable1["Report_time"][i])[0:2]+":"+str(newtable1["Report_time"][i])[3:5]+":"+str(newtable1["Report_time"][i])[6:]
else:
Report_time[i]=num.nan
#newtable1["Global ID"][i]=str(uuid.uuid4())
newtable1["Report_date"]=Report_date
newtable1["Incident_date"]=Incident_date
newtable1["Report_time"]=Report_time
newtable1["Incident_time"]=Incident_time
newtable1["Global_ID"]=range(len(newtable1["Global_ID"]))
newtable1=newtable1.replace(0,num.nan)
newtable1=newtable1.replace("0",num.nan)
newtable1=newtable1.replace("00",num.nan)
newtable1=newtable1.replace("",num.nan)
#newtable1=newtable1.sort(["Report_date"])
#newtable1["incid"] = Series(range(1, len(newtable1)+1)).unique()
newtable1=newtable1[["Global_ID", "Reported_address", "Incident_date", "Incident_time", "Report_date", "Report_time", "Latitude", "Longitude", "IBR", "IBR_description", "Police_Department_Code", "PD_description", "State_Statute_Number", "State_Statute_Literal","flag_geocode"]]
newtable1.to_csv('*****.csv', index=False)
#df2dbf(newtable1, x)
|
vickyting0910/opengeocoding
|
1norman.py
|
Python
|
bsd-2-clause
| 5,441
|
# Copyright (c) 2017, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.core.management.base import BaseCommand
from ...backends.encrypted_cookies import SessionStore
class Command(BaseCommand):
help = "Decrypt a session as passed from the front-end."
def handle(self, *args, **options):
store = SessionStore(args[0])
session_data = store.load()
self.stdout.write(session_data)
|
djaodjin/djaodjin-deployutils
|
deployutils/apps/django/management/commands/decrypt_session.py
|
Python
|
bsd-2-clause
| 1,698
|
#!/usr/bin/env python
import pandas as pd
import rosbag_pandas
import sys, os, pdb
from PIL import Image
from io import BytesIO
sys.path.append(os.path.abspath('../..')) # Not clean
from annotate_base import AnnotateBase
class BagReader(AnnotateBase):
def __init__(self, num_actions=2, newtopic=True):
super(BagReader, self).__init__(num_actions=num_actions)
if newtopic:
self.topic = 'bebop_image_raw_compressed_throttle'
else:
self.topic = 'bebop_image_raw_throttle_compressed'
def _load_bag_data(self, file):
bag = rosbag_pandas.bag_to_dataframe(file)
bag = bag.rename(columns={self.topic+'__data': 'data', self.topic+'__format': 'format'})
df = bag[bag['format'].notnull()]
self.image_data = df['data'].values
self.num_images = self.image_data.size
(self.width, self.height) = Image.open(BytesIO(self.image_data[0])).size
assert self.width==856 and self.height==480, "Unexpected image dimensions (%d, %d)" % (self.width, self.height)
|
jon-courtney/cnn-autonomous-drone
|
shared/bagreader.py
|
Python
|
bsd-2-clause
| 1,056
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.api import _
from lino_xl.lib.lists.models import *
# from lino_xl.lib.coachings.mixins import Coachable
from lino_tera.lib.contacts.models import Partner
class List(List, Partner):
class Meta(List.Meta):
app_label = 'lists'
abstract = dd.is_abstract_model(__name__, 'List')
verbose_name = _("Therapeutical group")
verbose_name = _("Therapeutical groups")
def full_clean(self, *args, **kw):
"""Set the `name` field of this list. This field is visible in the
Partner's detail but not in the Lists's detail where it is
filled automatically from the designation in the site's main
language. and serves for sorting when selecting a List as
Partner.
"""
# self.name = dd.babelattr(self, 'designation', language=)
if self.designation:
self.name = self.designation
else:
self.designation = self.name
super(List, self).full_clean(*args, **kw)
|
lino-framework/tera
|
lino_tera/lib/lists/models.py
|
Python
|
bsd-2-clause
| 1,138
|
import os.path
EMULATED_VERSION = 'Muck2.2fb7.00'
MAX_VARS = 54
HISTORY_FILE = os.path.expanduser("~/.mufsim_history")
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
revarbat/mufsim
|
mufsim/configs.py
|
Python
|
bsd-2-clause
| 184
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2011 Umeå University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes and functions that a SAML2.0 Service Provider (SP) may use
to conclude its tasks.
"""
from urllib.parse import urlencode
from urllib.parse import urlparse
from saml2.entity import Entity
from saml2.mdstore import destinations
from saml2.profile import paos, ecp
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.samlp import AuthnQuery
from saml2.samlp import NameIDMappingRequest
from saml2.samlp import AttributeQuery
from saml2.samlp import AuthzDecisionQuery
from saml2.samlp import AuthnRequest
import saml2
import time
from saml2.soap import make_soap_enveloped_saml_thingy
from urllib.parse import parse_qs
from saml2.s_utils import signature, UnravelError
from saml2.s_utils import do_attributes
from saml2 import samlp, BINDING_SOAP, SAMLError
from saml2 import saml
from saml2 import soap
from saml2.population import Population
from saml2.response import AttributeResponse, StatusError
from saml2.response import AuthzResponse
from saml2.response import AssertionIDResponse
from saml2.response import AuthnQueryResponse
from saml2.response import NameIDMappingResponse
from saml2.response import AuthnResponse
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_PAOS
import logging
logger = logging.getLogger(__name__)
SSO_BINDING = saml2.BINDING_HTTP_REDIRECT
FORM_SPEC = """<form method="post" action="%s">
<input type="hidden" name="SAMLRequest" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
<input type="submit" value="Submit" />
</form>"""
LAX = False
ECP_SERVICE = "urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
ACTOR = "http://schemas.xmlsoap.org/soap/actor/next"
MIME_PAOS = "application/vnd.paos+xml"
class IdpUnspecified(SAMLError):
pass
class VerifyError(SAMLError):
pass
class LogoutError(SAMLError):
pass
class NoServiceDefined(SAMLError):
pass
class Base(Entity):
""" The basic pySAML2 service provider class """
def __init__(self, config=None, identity_cache=None, state_cache=None,
virtual_organization="", config_file=""):
"""
:param config: A saml2.config.Config instance
:param identity_cache: Where the class should store identity information
:param state_cache: Where the class should keep state information
:param virtual_organization: A specific virtual organization
"""
Entity.__init__(self, "sp", config, config_file, virtual_organization)
self.users = Population(identity_cache)
# for server state storage
if state_cache is None:
self.state = {} # in memory storage
else:
self.state = state_cache
self.logout_requests_signed = False
self.allow_unsolicited = False
self.authn_requests_signed = False
self.want_assertions_signed = False
self.want_response_signed = False
for foo in ["allow_unsolicited", "authn_requests_signed",
"logout_requests_signed", "want_assertions_signed",
"want_response_signed"]:
v = self.config.getattr(foo, "sp")
if v is True or v == 'true':
setattr(self, foo, True)
self.artifact2response = {}
self.lock = None
#
# Private methods
#
def _relay_state(self, session_id):
vals = [session_id, str(int(time.time()))]
if self.config.secret is None:
vals.append(signature("", vals))
else:
vals.append(signature(self.config.secret, vals))
return "|".join(vals)
def _sso_location(self, entityid=None, binding=BINDING_HTTP_REDIRECT):
if entityid:
# verify that it's in the metadata
srvs = self.metadata.single_sign_on_service(entityid, binding)
if srvs:
return destinations(srvs)[0]
else:
logger.info("_sso_location: %s, %s" % (entityid, binding))
raise IdpUnspecified("No IdP to send to given the premises")
# get the idp location from the metadata. If there is more than one
# IdP in the configuration raise exception
eids = self.metadata.with_descriptor("idpsso")
if len(eids) > 1:
raise IdpUnspecified("Too many IdPs to choose from: %s" % eids)
try:
srvs = self.metadata.single_sign_on_service(list(eids.keys())[0], binding)
return destinations(srvs)[0]
except IndexError:
raise IdpUnspecified("No IdP to send to given the premises")
def _my_name(self):
return self.config.name
#
# Public API
#
def add_vo_information_about_user(self, name_id):
""" Add information to the knowledge I have about the user. This is
for Virtual organizations.
:param name_id: The subject identifier
:return: A possibly extended knowledge.
"""
ava = {}
try:
(ava, _) = self.users.get_identity(name_id)
except KeyError:
pass
# is this a Virtual Organization situation
if self.vorg:
if self.vorg.do_aggregation(name_id):
# Get the extended identity
ava = self.users.get_identity(name_id)[0]
return ava
#noinspection PyUnusedLocal
def is_session_valid(self, _session_id):
""" Place holder. Supposed to check if the session is still valid.
"""
return True
def service_urls(self, binding=BINDING_HTTP_POST):
_res = self.config.endpoint("assertion_consumer_service", binding, "sp")
if _res:
return _res
else:
return None
def create_authn_request(self, destination, vorg="", scoping=None,
binding=saml2.BINDING_HTTP_POST,
nameid_format=NAMEID_FORMAT_TRANSIENT,
service_url_binding=None, message_id=0,
consent=None, extensions=None, sign=None,
allow_create=False, sign_prepare=False, **kwargs):
""" Creates an authentication request.
:param destination: Where the request should be sent.
:param vorg: The virtual organization the service belongs to.
:param scoping: The scope of the request
:param binding: The protocol to use for the Response !!
:param nameid_format: Format of the NameID
:param service_url_binding: Where the reply should be sent dependent
on reply binding.
:param message_id: The identifier for this request
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the request should be signed or not.
:param sign_prepare: Whether the signature should be prepared or not.
:param allow_create: If the identity provider is allowed, in the course
of fulfilling the request, to create a new identifier to represent
the principal.
:param kwargs: Extra key word arguments
:return: tuple of request ID and <samlp:AuthnRequest> instance
"""
client_crt = None
if "client_crt" in kwargs:
client_crt = kwargs["client_crt"]
args = {}
try:
args["assertion_consumer_service_url"] = kwargs[
"assertion_consumer_service_urls"][0]
del kwargs["assertion_consumer_service_urls"]
except KeyError:
try:
args["assertion_consumer_service_url"] = kwargs[
"assertion_consumer_service_url"]
del kwargs["assertion_consumer_service_urls"]
except KeyError:
try:
args["attribute_consuming_service_index"] = str(kwargs[
"attribute_consuming_service_index"])
del kwargs["attribute_consuming_service_index"]
except KeyError:
if service_url_binding is None:
service_urls = self.service_urls(binding)
else:
service_urls = self.service_urls(service_url_binding)
args["assertion_consumer_service_url"] = service_urls[0]
try:
args["provider_name"] = kwargs["provider_name"]
except KeyError:
if binding == BINDING_PAOS:
pass
else:
args["provider_name"] = self._my_name()
try:
args["name_id_policy"] = kwargs["name_id_policy"]
del kwargs["name_id_policy"]
except KeyError:
if allow_create:
allow_create = "true"
else:
allow_create = "false"
# Profile stuff, should be configurable
if nameid_format is None:
name_id_policy = samlp.NameIDPolicy(
allow_create=allow_create, format=NAMEID_FORMAT_TRANSIENT)
elif nameid_format == "":
name_id_policy = None
else:
name_id_policy = samlp.NameIDPolicy(allow_create=allow_create,
format=nameid_format)
if name_id_policy and vorg:
try:
name_id_policy.sp_name_qualifier = vorg
name_id_policy.format = saml.NAMEID_FORMAT_PERSISTENT
except KeyError:
pass
args["name_id_policy"] = name_id_policy
if kwargs:
_args, extensions = self._filter_args(AuthnRequest(), extensions,
**kwargs)
args.update(_args)
try:
del args["id"]
except KeyError:
pass
if (sign and self.sec.cert_handler.generate_cert()) or \
client_crt is not None:
with self.lock:
self.sec.cert_handler.update_cert(True, client_crt)
if client_crt is not None:
sign_prepare = True
return self._message(AuthnRequest, destination, message_id,
consent, extensions, sign, sign_prepare,
protocol_binding=binding,
scoping=scoping, **args)
return self._message(AuthnRequest, destination, message_id, consent,
extensions, sign, sign_prepare,
protocol_binding=binding,
scoping=scoping, **args)
def create_attribute_query(self, destination, name_id=None,
attribute=None, message_id=0, consent=None,
extensions=None, sign=False, sign_prepare=False,
**kwargs):
""" Constructs an AttributeQuery
:param destination: To whom the query should be sent
:param name_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for. The key are one of 4 variants:
3-tuple of name_format,name and friendly_name,
2-tuple of name_format and name,
1-tuple with name or
just the name as a string.
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param format: The format of the name ID
:param message_id: The identifier of the session
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the query should be signed or not.
:param sign_prepare: Whether the Signature element should be added.
:return: Tuple of request ID and an AttributeQuery instance
"""
if name_id is None:
if "subject_id" in kwargs:
name_id = saml.NameID(text=kwargs["subject_id"])
for key in ["sp_name_qualifier", "name_qualifier",
"format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
else:
raise AttributeError("Missing required parameter")
elif isinstance(name_id, str):
name_id = saml.NameID(text=name_id)
for key in ["sp_name_qualifier", "name_qualifier", "format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
subject = saml.Subject(name_id=name_id)
if attribute:
attribute = do_attributes(attribute)
return self._message(AttributeQuery, destination, message_id, consent,
extensions, sign, sign_prepare, subject=subject,
attribute=attribute)
# MUST use SOAP for
# AssertionIDRequest, SubjectQuery,
# AuthnQuery, AttributeQuery, or AuthzDecisionQuery
def create_authz_decision_query(self, destination, action,
evidence=None, resource=None, subject=None,
message_id=0, consent=None, extensions=None,
sign=None, **kwargs):
""" Creates an authz decision query.
:param destination: The IdP endpoint
:param action: The action you want to perform (has to be at least one)
:param evidence: Why you should be able to perform the action
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
return self._message(AuthzDecisionQuery, destination, message_id,
consent, extensions, sign, action=action,
evidence=evidence, resource=resource,
subject=subject)
def create_authz_decision_query_using_assertion(self, destination,
assertion, action=None,
resource=None,
subject=None, message_id=0,
consent=None,
extensions=None,
sign=False):
""" Makes an authz decision query based on a previously received
Assertion.
:param destination: The IdP endpoint to send the request to
:param assertion: An Assertion instance
:param action: The action you want to perform (has to be at least one)
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
if action:
if isinstance(action, str):
_action = [saml.Action(text=action)]
else:
_action = [saml.Action(text=a) for a in action]
else:
_action = None
return self.create_authz_decision_query(
destination, _action, saml.Evidence(assertion=assertion),
resource, subject, message_id=message_id, consent=consent,
extensions=extensions, sign=sign)
def create_assertion_id_request(self, assertion_id_refs, **kwargs):
"""
:param assertion_id_refs:
:return: One ID ref
"""
if isinstance(assertion_id_refs, str):
return 0, assertion_id_refs
else:
return 0, assertion_id_refs[0]
def create_authn_query(self, subject, destination=None, authn_context=None,
session_index="", message_id=0, consent=None,
extensions=None, sign=False):
"""
:param subject: The subject its all about as a <Subject> instance
:param destination: The IdP endpoint to send the request to
:param authn_context: list of <RequestedAuthnContext> instances
:param session_index: a specified session index
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return:
"""
return self._message(AuthnQuery, destination, message_id, consent, extensions,
sign, subject=subject, session_index=session_index,
requested_authn_context=authn_context)
def create_name_id_mapping_request(self, name_id_policy,
name_id=None, base_id=None,
encrypted_id=None, destination=None,
message_id=0, consent=None, extensions=None,
sign=False):
"""
:param name_id_policy:
:param name_id:
:param base_id:
:param encrypted_id:
:param destination:
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return:
"""
# One of them must be present
assert name_id or base_id or encrypted_id
if name_id:
return self._message(NameIDMappingRequest, destination, message_id,
consent, extensions, sign,
name_id_policy=name_id_policy, name_id=name_id)
elif base_id:
return self._message(NameIDMappingRequest, destination, message_id,
consent, extensions, sign,
name_id_policy=name_id_policy, base_id=base_id)
else:
return self._message(NameIDMappingRequest, destination, message_id,
consent, extensions, sign,
name_id_policy=name_id_policy,
encrypted_id=encrypted_id)
# ======== response handling ===========
def parse_authn_request_response(self, xmlstr, binding, outstanding=None,
outstanding_certs=None):
""" Deal with an AuthnResponse
:param xmlstr: The reply as a xml string
:param binding: Which binding that was used for the transport
:param outstanding: A dictionary with session IDs as keys and
the original web request from the user before redirection
as values.
:return: An response.AuthnResponse or None
"""
try:
_ = self.config.entityid
except KeyError:
raise SAMLError("Missing entity_id specification")
resp = None
if xmlstr:
kwargs = {
"outstanding_queries": outstanding,
"outstanding_certs": outstanding_certs,
"allow_unsolicited": self.allow_unsolicited,
"want_assertions_signed": self.want_assertions_signed,
"want_response_signed": self.want_response_signed,
"return_addrs": self.service_urls(),
"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters,
"allow_unknown_attributes":
self.config.allow_unknown_attributes,
}
try:
resp = self._parse_response(xmlstr, AuthnResponse,
"assertion_consumer_service",
binding, **kwargs)
except StatusError as err:
logger.error("SAML status error: %s" % err)
raise
except UnravelError:
return None
except Exception as exc:
logger.error("%s" % exc)
raise
#logger.debug(">> %s", resp)
if resp is None:
return None
elif isinstance(resp, AuthnResponse):
self.users.add_information_about_person(resp.session_info())
logger.info("--- ADDED person info ----")
pass
else:
logger.error("Response type not supported: %s" % (
saml2.class_name(resp),))
return resp
# ------------------------------------------------------------------------
# SubjectQuery, AuthnQuery, RequestedAuthnContext, AttributeQuery,
# AuthzDecisionQuery all get Response as response
def parse_authz_decision_query_response(self, response,
binding=BINDING_SOAP):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AuthzResponse, "", binding,
**kwargs)
def parse_authn_query_response(self, response, binding=BINDING_SOAP):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AuthnQueryResponse, "", binding,
**kwargs)
def parse_assertion_id_request_response(self, response, binding):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
res = self._parse_response(response, AssertionIDResponse, "", binding,
**kwargs)
return res
# ------------------------------------------------------------------------
def parse_attribute_query_response(self, response, binding):
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AttributeResponse,
"attribute_consuming_service", binding,
**kwargs)
def parse_name_id_mapping_request_response(self, txt, binding=BINDING_SOAP):
"""
:param txt: SOAP enveloped SAML message
:param binding: Just a placeholder, it's always BINDING_SOAP
:return: parsed and verified <NameIDMappingResponse> instance
"""
return self._parse_response(txt, NameIDMappingResponse, "", binding)
# ------------------- ECP ------------------------------------------------
def create_ecp_authn_request(self, entityid=None, relay_state="",
sign=False, **kwargs):
""" Makes an authentication request.
:param entityid: The entity ID of the IdP to send the request to
:param relay_state: A token that can be used by the SP to know
where to continue the conversation with the client
:param sign: Whether the request should be signed or not.
:return: SOAP message with the AuthnRequest
"""
# ----------------------------------------
# <paos:Request>
# ----------------------------------------
my_url = self.service_urls(BINDING_PAOS)[0]
# must_understand and act according to the standard
#
paos_request = paos.Request(must_understand="1", actor=ACTOR,
response_consumer_url=my_url,
service=ECP_SERVICE)
# ----------------------------------------
# <ecp:RelayState>
# ----------------------------------------
relay_state = ecp.RelayState(actor=ACTOR, must_understand="1",
text=relay_state)
# ----------------------------------------
# <samlp:AuthnRequest>
# ----------------------------------------
try:
authn_req = kwargs["authn_req"]
try:
req_id = authn_req.id
except AttributeError:
req_id = 0 # Unknown but since it's SOAP it doesn't matter
except KeyError:
try:
_binding = kwargs["binding"]
except KeyError:
_binding = BINDING_SOAP
kwargs["binding"] = _binding
logger.debug("entityid: %s, binding: %s" % (entityid, _binding))
# The IDP publishes support for ECP by using the SOAP binding on
# SingleSignOnService
_, location = self.pick_binding("single_sign_on_service",
[_binding], entity_id=entityid)
req_id, authn_req = self.create_authn_request(
location, service_url_binding=BINDING_PAOS, **kwargs)
# ----------------------------------------
# The SOAP envelope
# ----------------------------------------
soap_envelope = make_soap_enveloped_saml_thingy(authn_req,
[paos_request,
relay_state])
return req_id, "%s" % soap_envelope
def parse_ecp_authn_response(self, txt, outstanding=None):
rdict = soap.class_instances_from_soap_enveloped_saml_thingies(txt,
[paos,
ecp,
samlp])
_relay_state = None
for item in rdict["header"]:
if item.c_tag == "RelayState" and\
item.c_namespace == ecp.NAMESPACE:
_relay_state = item
response = self.parse_authn_request_response(rdict["body"],
BINDING_PAOS, outstanding)
return response, _relay_state
@staticmethod
def can_handle_ecp_response(response):
try:
accept = response.headers["accept"]
except KeyError:
try:
accept = response.headers["Accept"]
except KeyError:
return False
if MIME_PAOS in accept:
return True
else:
return False
# ----------------------------------------------------------------------
# IDP discovery
# ----------------------------------------------------------------------
@staticmethod
def create_discovery_service_request(url, entity_id, **kwargs):
"""
Created the HTTP redirect URL needed to send the user to the
discovery service.
:param url: The URL of the discovery service
:param entity_id: The unique identifier of the service provider
:param return: The discovery service MUST redirect the user agent
to this location in response to this request
:param policy: A parameter name used to indicate the desired behavior
controlling the processing of the discovery service
:param returnIDParam: A parameter name used to return the unique
identifier of the selected identity provider to the original
requester.
:param isPassive: A boolean value True/False that controls
whether the discovery service is allowed to visibly interact with
the user agent.
:return: A URL
"""
args = {"entityID": entity_id}
for key in ["policy", "returnIDParam"]:
try:
args[key] = kwargs[key]
except KeyError:
pass
try:
args["return"] = kwargs["return_url"]
except KeyError:
try:
args["return"] = kwargs["return"]
except KeyError:
pass
if "isPassive" in kwargs:
if kwargs["isPassive"]:
args["isPassive"] = "true"
else:
args["isPassive"] = "false"
params = urlencode(args)
return "%s?%s" % (url, params)
@staticmethod
def parse_discovery_service_response(url="", query="",
returnIDParam="entityID"):
"""
Deal with the response url from a Discovery Service
:param url: the url the user was redirected back to or
:param query: just the query part of the URL.
:param returnIDParam: This is where the identifier of the IdP is
place if it was specified in the query. Default is 'entityID'
:return: The IdP identifier or "" if none was given
"""
if url:
part = urlparse(url)
qsd = parse_qs(part[4])
elif query:
qsd = parse_qs(query)
else:
qsd = {}
try:
return qsd[returnIDParam][0]
except KeyError:
return ""
|
rohe/pysaml2-3
|
src/saml2/client_base.py
|
Python
|
bsd-2-clause
| 30,922
|
# coding: utf-8
class Semantics:
def _default(self, ast):
return ast
def clean_label(self, label):
return label.replace(':', '')
def conjugated_verb(self, ast):
return {'t': ast[0],
'v': {'t': ast[1], 'v': ast[2]}}
def formatted(self, ast):
return {'t': ast[0], 'v': ast[1]}
def is_action(self, ptype):
"Return True if this ptype is verb tense."
parts = ptype.split('_')
return ('v' in parts) or ('tdy' in parts) or ('tmw' in parts) or ('ydy' in parts)
def token(self, ast):
try:
float(ast)
return {'t':'number', 'v': ast }
except ValueError:
return {'t':'alpha', 'v': ast }
def raw(self, ast):
"A special primitive type like token()."
fun, arg = ast.split('(')
return {'t':fun, 'v': arg[:-1]}
def triple_phrase(self, ast):
return self.tuple(ast)
def tuple(self, ast):
return self.tuple_kwl(ast)
def tuple_verb(self, ast):
return self.tuple_kwl(ast)
def tuple_entry_preposition_p_kwl(self, ast):
return self.tuple_kwl(ast)
def tuple_kwl(self, ast):
return {'t': '%s_%s' % (ast[0]['t'], ast[1]['t']),
'v': ast}
def get_tuple_type(self, label):
label = label.replace(':', '') # Remove trailing ':'
ptypes = {
'act': 'v_p', # Verb phrase
'adj': 'a_p', # Adjective-headed phrase
'det': 'd_p', # Determiner-headed phrase
'nom': 'n_p', # Noun phrase
'pro': 'n_p', # Noun phrase
'pre': 'p_p', # Prepositional phrase
'raw': 'n_p', # Noun phrase
'sci': 'n_p', # Noun phrase
}
if label in ptypes:
return ptypes[label]
else:
return '%s_p' % label
def fun_word(self, ast):
new_ast = []
if len(ast) == 2:
new_ast.append(ast[0])
new_ast.append(self.phrase(ast[1]))
else:
new_ast = ast
return new_ast
def phrase(self, words):
ptypes = {
'act': 'v_p', # Verb phrase
'adj': 'a_p', # Adjective-headed phrase
'det': 'd_p', # Determiner-headed phrase
'nom': 'n_p', # Noun phrase
'pro': 'n_p', # Noun phrase
'pre': 'p_p', # Prepositional phrase
'raw': 'n_p', # Noun phrase
'sci': 'n_p', # Noun phrase
}
if len(words) == 0:
return None
elif len(words) == 1:
return self._phrase(words, ptypes)
elif len(words) == 2:
return {'t': '%s_%s' % (words[0]['t'], words[1]['t']), 'v': words }
elif len(words) > 2:
return self._phrase(words, ptypes)
def _phrase(self, words, ptypes):
if words[0]['t'] in ptypes:
return {'t': ptypes[words[0]['t']], 'v': words }
else:
return {'t': '%s_p' % words[0]['t'], 'v': words }
def sub_expression(self, ast):
new_ast = ast[1] # Remove left_paren (ast[0]), and right_paren (ast[2])
return new_ast
def subject_verb(self, ast):
return {'t': 's_v',
'v': [{'t': 'subject', 'v': ast['subject']},
{'t':'verb', 'v': ast['verb']}]}
def subject_verb_object(self, ast):
return {'t': 's_v_o',
'v': [{'t': 'subject', 'v': ast['subject']},
{'t':'verb', 'v': ast['verb']},
{'t':'object', 'v': ast['object']}]}
def verb_object(self, ast):
return {'t': 'v_o',
'v': [{'t': 'verb', 'v': ast['verb']},
{'t':'object', 'v': ast['object']}]}
def statement(self, clauses):
"Sentence type."
return {'t': 'statement', 'v': clauses }
def question(self, clauses):
"Sentence type."
return {'t': 'question', 'v': clauses }
def command(self, clauses):
"Sentence type."
return {'t': 'command', 'v': clauses }
def conjunction(self, ast):
"A conjunction is a triplet, the type is the middle element."
if 'v' in ast:
return ast
else: # base case
return {'t': ast[1], 'v': [ast[0], ast[2]]}
def kwl2text(self, sentences):
ns = []
for s in sentences:
if 't' in s: # Ignore semicolon parses
ns.append(s)
return {'t': 'story', 'v': ns}
|
kasahorow/kwl
|
kwl2text/semantics.py
|
Python
|
bsd-2-clause
| 4,068
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import pprint
from subprocess import Popen, PIPE
import os
from future.tests.base import CodeHandler, unittest
class TestFuturizeSimple(CodeHandler):
"""
This class contains snippets of Python 2 code (invalid Python 3) and
tests for whether they can be passed to ``futurize`` and immediately
run under both Python 2 again and Python 3.
"""
@unittest.expectedFailure
def test_problematic_string(self):
""" This string generates a SyntaxError on Python 3 unless it has
an r prefix.
"""
before = r"""
s = 'The folder is "C:\Users"'.
"""
after = r"""
s = r'The folder is "C:\Users"'.
"""
self.convert_check(before, after)
def test_import_builtins(self):
before = """
a = raw_input()
b = open(a, b, c)
c = filter(a, b)
d = map(a, b)
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in xrange(10**10):
pass
super(MyClass, self)
"""
after = """
from __future__ import unicode_literals
from future.builtins import bytes
from future.builtins import filter
from future.builtins import input
from future.builtins import map
from future.builtins import open
from future.builtins import range
from future.builtins import super
a = input()
b = open(a, b, c)
c = list(filter(a, b))
d = list(map(a, b))
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in range(10**10):
pass
super(MyClass, self)
"""
self.convert_check(before, after, ignore_imports=False, run=False)
def test_xrange(self):
code = '''
for i in xrange(10):
pass
'''
self.convert(code)
@unittest.expectedFailure
def test_source_coding_utf8(self):
"""
Tests to ensure that the source coding line is not corrupted or
removed. It must be left as the first line in the file (including
before any __future__ imports). Also tests whether the unicode
characters in this encoding are parsed correctly and left alone.
"""
code = """
# -*- coding: utf-8 -*-
icons = [u"◐", u"◓", u"◑", u"◒"]
"""
self.unchanged(code)
def test_exception_syntax(self):
"""
Test of whether futurize handles the old-style exception syntax
"""
before = """
try:
pass
except IOError, e:
val = e.errno
"""
after = """
try:
pass
except IOError as e:
val = e.errno
"""
self.convert_check(before, after)
def test_super(self):
"""
This tests whether futurize keeps the old two-argument super() calls the
same as before. It should, because this still works in Py3.
"""
code = '''
class VerboseList(list):
def append(self, item):
print('Adding an item')
super(VerboseList, self).append(item)
'''
self.unchanged(code)
@unittest.expectedFailure
def test_file(self):
"""
file() as a synonym for open() is obsolete and invalid on Python 3.
"""
before = '''
f = file(__file__)
data = f.read()
f.close()
'''
after = '''
f = open(__file__)
data = f.read()
f.close()
'''
self.convert_check(before, after)
def test_apply(self):
before = '''
def addup(*x):
return sum(x)
assert apply(addup, (10,20)) == 30
'''
after = """
def addup(*x):
return sum(x)
assert addup(*(10,20)) == 30
"""
self.convert_check(before, after)
@unittest.skip('not implemented yet')
def test_download_pypi_package_and_test(self, package_name='future'):
URL = 'http://pypi.python.org/pypi/{0}/json'
import requests
r = requests.get(URL.format(package_name))
pprint.pprint(r.json())
download_url = r.json()['urls'][0]['url']
filename = r.json()['urls'][0]['filename']
# r2 = requests.get(download_url)
# with open('/tmp/' + filename, 'w') as tarball:
# tarball.write(r2.content)
def test_raw_input(self):
"""
Passes in a string to the waiting input() after futurize
conversion.
The code is the first snippet from these docs:
http://docs.python.org/2/library/2to3.html
"""
before = """
def greet(name):
print "Hello, {0}!".format(name)
print "What's your name?"
name = raw_input()
greet(name)
"""
desired = """
def greet(name):
print("Hello, {0}!".format(name))
print("What's your name?")
name = input()
greet(name)
"""
self.convert_check(before, desired, run=False)
for interpreter in self.interpreters:
p1 = Popen([interpreter, self.tempdir + 'mytestscript.py'],
stdout=PIPE, stdin=PIPE, stderr=PIPE, env=self.env)
(stdout, stderr) = p1.communicate(b'Ed')
self.assertEqual(stdout, b"What's your name?\nHello, Ed!\n")
def test_literal_prefixes_are_not_stripped(self):
"""
Tests to ensure that the u'' and b'' prefixes on unicode strings and
byte strings are not removed by the futurize script. Removing the
prefixes on Py3.3+ is unnecessary and loses some information -- namely,
that the strings have explicitly been marked as unicode or bytes,
rather than just e.g. a guess by some automated tool about what they
are.
"""
code = '''
s = u'unicode string'
b = b'byte string'
'''
self.unchanged(code)
@unittest.expectedFailure
def test_division(self):
"""
TODO: implement this!
"""
before = """
x = 1 / 2
"""
after = """
from future.utils import old_div
x = old_div(1, 2)
"""
self.convert_check(before, after, stages=[1])
class TestFuturizeRenamedStdlib(CodeHandler):
def test_renamed_modules(self):
before = """
import ConfigParser
import copy_reg
import cPickle
import cStringIO
s = cStringIO.StringIO('blah')
"""
after = """
import configparser
import copyreg
import pickle
import io
s = io.StringIO('blah')
"""
self.convert_check(before, after)
@unittest.expectedFailure
def test_urllib_refactor(self):
# Code like this using urllib is refactored by futurize --stage2 to use
# the new Py3 module names, but ``future`` doesn't support urllib yet.
before = """
import urllib
URL = 'http://pypi.python.org/pypi/future/json'
package_name = 'future'
r = urllib.urlopen(URL.format(package_name))
data = r.read()
"""
after = """
import urllib.request
URL = 'http://pypi.python.org/pypi/future/json'
package_name = 'future'
r = urllib.request.urlopen(URL.format(package_name))
data = r.read()
"""
self.convert_check(before, after)
def test_renamed_copy_reg_and_cPickle_modules(self):
"""
Example from docs.python.org/2/library/copy_reg.html
"""
before = """
import copy_reg
import copy
import cPickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copy_reg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = cPickle.dumps(c)
"""
after = """
import copyreg
import copy
import pickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copyreg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = pickle.dumps(c)
"""
self.convert_check(before, after)
@unittest.expectedFailure
def test_Py2_StringIO_module(self):
"""
Ideally, there would be a fixer for this. For now:
TODO: add the Py3 equivalent for this to the docs
"""
before = """
import cStringIO
s = cStringIO.StringIO('my string')
assert isinstance(s, cStringIO.InputType)
"""
after = """
import io
s = io.StringIO('my string')
# assert isinstance(s, io.InputType)
# There is no io.InputType in Python 3. What should we change this to
# instead?
"""
self.convert_check(before, after)
class TestFuturizeStage1(CodeHandler):
"""
Tests "stage 1": safe optimizations: modernizing Python 2 code so that it
uses print functions, new-style exception syntax, etc.
The behaviour should not change and this should introduce no dependency on
the ``future`` package. It produces more modern Python 2-only code. The
goal is to reduce the size of the real porting patch-set by performing
the uncontroversial patches first.
"""
def test_apply(self):
"""
apply() should be changed by futurize --stage1
"""
before = '''
def f(a, b):
return a + b
args = (1, 2)
assert apply(f, args) == 3
assert apply(f, ('a', 'b')) == 'ab'
'''
after = '''
def f(a, b):
return a + b
args = (1, 2)
assert f(*args) == 3
assert f(*('a', 'b')) == 'ab'
'''
self.convert_check(before, after, stages=[1])
def test_xrange(self):
"""
xrange should not be changed by futurize --stage1
"""
code = '''
for i in xrange(10):
pass
'''
self.unchanged(code, stages=[1])
@unittest.expectedFailure
def test_absolute_import_changes(self):
"""
Implicit relative imports should be converted to absolute or explicit
relative imports correctly.
Issue #16 (with porting bokeh/bbmodel.py)
"""
with open('specialmodels.py', 'w') as f:
f.write('pass')
before = """
import specialmodels.pandasmodel
specialmodels.pandasmodel.blah()
"""
after = """
from __future__ import absolute_import
from .specialmodels import pandasmodel
pandasmodel.blah()
"""
self.convert_check(before, after, stages=[1])
def test_safe_futurize_imports(self):
"""
The standard library module names should not be changed until stage 2
"""
before = """
import ConfigParser
import HTMLParser
import collections
ConfigParser.ConfigParser
HTMLParser.HTMLParser
d = collections.OrderedDict()
"""
self.unchanged(before, stages=[1])
def test_print(self):
before = """
print 'Hello'
"""
after = """
print('Hello')
"""
self.convert_check(before, after, stages=[1])
before = """
import sys
print >> sys.stderr, 'Hello', 'world'
"""
after = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.convert_check(before, after, stages=[1])
def test_print_already_function(self):
"""
Running futurize --stage1 should not add a second set of parentheses
"""
before = """
print('Hello')
"""
self.unchanged(before, stages=[1])
@unittest.expectedFailure
def test_print_already_function_complex(self):
"""
Running futurize --stage1 does add a second second set of parentheses
in this case. This is because the underlying lib2to3 has two distinct
grammars -- with a print statement and with a print function -- and,
when going forwards (2 to both), futurize assumes print is a statement,
which raises a ParseError.
"""
before = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.unchanged(before, stages=[1])
def test_exceptions(self):
before = """
try:
raise AttributeError('blah')
except AttributeError, e:
pass
"""
after = """
try:
raise AttributeError('blah')
except AttributeError as e:
pass
"""
self.convert_check(before, after, stages=[1])
@unittest.expectedFailure
def test_string_exceptions(self):
"""
2to3 does not convert string exceptions: see
http://python3porting.com/differences.html.
"""
before = """
try:
raise "old string exception"
except Exception, e:
pass
"""
after = """
try:
raise Exception("old string exception")
except Exception as e:
pass
"""
self.convert_check(before, after, stages=[1])
@unittest.expectedFailure
def test_oldstyle_classes(self):
"""
We don't convert old-style classes to new-style automatically. Should we?
"""
before = """
class Blah:
pass
"""
after = """
class Blah(object):
pass
"""
self.convert_check(before, after, stages=[1])
@unittest.expectedFailure
def test_all(self):
"""
Standard library module names should not be changed in stage 1
"""
before = """
import ConfigParser
import HTMLParser
import collections
print 'Hello'
try:
raise AttributeError('blah')
except AttributeError, e:
pass
print 'Number is', 1 / 2
"""
after = """
from future.utils import old_div
import Configparser
import HTMLParser
import collections
print('Hello')
try:
raise AttributeError('blah')
except AttributeError as e:
pass
print('Number is', old_div(1, 2))
"""
self.convert_check(before, after, stages=[1])
def test_octal_literals(self):
before = """
mode = 0644
"""
after = """
mode = 0o644
"""
self.convert_check(before, after)
def test_long_int_literals(self):
before = """
bignumber = 12345678901234567890L
"""
after = """
bignumber = 12345678901234567890
"""
self.convert_check(before, after)
def test___future___import_position(self):
"""
Issue #4: __future__ imports inserted too low in file: SyntaxError
"""
code = """
# Comments here
# and here
__version__=''' $Id$ '''
__doc__="A Sequencer class counts things. It aids numbering and formatting lists."
__all__='Sequencer getSequencer setSequencer'.split()
#
# another comment
#
CONSTANTS = [ 0, 01, 011, 0111, 012, 02, 021, 0211, 02111, 013 ]
_RN_LETTERS = "IVXLCDM"
def my_func(value):
pass
''' Docstring-like comment here '''
"""
self.convert(code)
if __name__ == '__main__':
unittest.main()
|
talishte/ctigre
|
env/lib/python2.7/site-packages/future/tests/test_futurize.py
|
Python
|
bsd-2-clause
| 16,106
|
"""Core control stuff for Coverage."""
import atexit, os, random, socket, sys
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class
from coverage.codeunit import code_unit_factory, CodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
from coverage.files import find_python_files
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
class coverage(object):
"""Programmatic access to Coverage.
To use::
from coverage import coverage
cov = coverage()
cov.start()
#.. blah blah (run your code) blah blah ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
create the final file name. If `data_suffix` is simply True, then a
suffix is created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what config file to read. If it is a string,
it is the name of the config file to read. If it is True, then a
standard file is read (".coveragerc"). If it is False, then no file is
read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`include` and `omit` are lists of filename patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
"""
from coverage import __version__
# A record of all the warnings that have been issued.
self._warnings = []
# Build our configuration from a number of sources:
# 1: defaults:
self.config = CoverageConfig()
# 2: from the coveragerc file:
if config_file:
if config_file is True:
config_file = ".coveragerc"
try:
self.config.from_file(config_file)
except ValueError:
_, err, _ = sys.exc_info()
raise CoverageException(
"Couldn't read config file %s: %s" % (config_file, err)
)
# 3: from environment variables:
self.config.from_environment('COVERAGE_OPTIONS')
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
self.config.data_file = env_data_file
# 4: from constructor arguments:
if isinstance(omit, string_class):
omit = [omit]
if isinstance(include, string_class):
include = [include]
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, omit=omit, include=include
)
self.auto_data = auto_data
self.atexit_registered = False
# _exclude_re is a dict mapping exclusion list names to compiled
# regexes.
self._exclude_re = {}
self._exclude_regex_stale()
self.file_locator = FileLocator()
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
for src in self.config.source or []:
if os.path.exists(src):
self.source.append(self.file_locator.canonical_filename(src))
else:
self.source_pkgs.append(src)
self.omit = self._prep_patterns(self.config.omit)
self.include = self._prep_patterns(self.config.include)
self.collector = Collector(
self._should_trace, timid=self.config.timid,
branch=self.config.branch, warn=self._warn
)
# Suffixes are a bit tricky. We want to use the data suffix only when
# collecting data, not when combining data. So we save it as
# `self.run_suffix` now, and promote it to `self.data_suffix` if we
# find that we are collecting data later.
if data_suffix or self.config.parallel:
if not isinstance(data_suffix, string_class):
# if data_suffix=True, use .machinename.pid.random
data_suffix = True
else:
data_suffix = None
self.data_suffix = None
self.run_suffix = data_suffix
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(
basename=self.config.data_file,
collector="coverage v%s" % __version__
)
# The dirs for files considered "installed with the interpreter".
self.pylib_dirs = []
if not self.config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for centralfitestoque), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
for m in (atexit, os, random, socket):
if hasattr(m, "__file__"):
m_dir = self._canonical_dir(m.__file__)
if m_dir not in self.pylib_dirs:
self.pylib_dirs.append(m_dir)
# To avoid tracing the coverage code itself, we skip anything located
# where we are.
self.cover_dir = self._canonical_dir(__file__)
# The matchers for _should_trace, created when tracing starts.
self.source_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
# Only _harvest_data once per measurement cycle.
self._harvested = False
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
# When tearing down the coverage object, modules can become None.
# Saving the modules as object attributes avoids problems, but it is
# quite ad-hoc which modules need to be saved and which references
# need to use the object attributes.
self.socket = socket
self.os = os
self.random = random
def _canonical_dir(self, f):
"""Return the canonical directory of the file `f`."""
return os.path.split(self.file_locator.canonical_filename(f))[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
if not filename.endswith(".py"):
if filename[-4:-1] == ".py":
filename = filename[:-1]
return filename
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a canonicalized filename if it should be traced, False if it
should not.
"""
if os is None:
return False
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# filenames like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return False
if filename.endswith(".html"):
# Jinja and maybe other templating systems compile templates into
# Python code, but use the template filename as the filename in
# the compiled code. Of course, those filenames are useless later
# so don't bother collecting. TODO: How should we really separate
# out good file extensions from bad?
return False
self._check_for_packages()
# Compiled Python files have two filenames: frame.f_code.co_filename is
# the filename at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for centralfitestoque, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals.get('__file__')
if dunder_file:
filename = self._source_for_file(dunder_file)
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = self.file_locator.canonical_filename(filename)
# If the user specified source, then that's authoritative about what to
# measure. If they didn't, then we have to exclude the stdlib and
# coverage.py directories.
if self.source_match:
if not self.source_match.match(canonical):
return False
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(canonical):
return False
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
if self.cover_match and self.cover_match.match(canonical):
return False
# Check the file against the include and omit patterns.
if self.include_match and not self.include_match.match(canonical):
return False
if self.omit_match and self.omit_match.match(canonical):
return False
return canonical
# To log what should_trace returns, change this to "if 1:"
if 0:
_real_should_trace = _should_trace
def _should_trace(self, filename, frame): # pylint: disable=E0102
"""A logging decorator around the real _should_trace function."""
ret = self._real_should_trace(filename, frame)
print("should_trace: %r -> %r" % (filename, ret))
return ret
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
def _prep_patterns(self, patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
patterns = patterns or []
prepped = []
for p in patterns or []:
if p.startswith("*") or p.startswith("?"):
prepped.append(p)
else:
prepped.append(self.file_locator.abs_file(p))
return prepped
def _check_for_packages(self):
"""Update the source_match matcher with latest imported packages."""
# Our self.source_pkgs attribute is a list of package names we want to
# measure. Each time through here, we see if we've imported any of
# them yet. If so, we add its file to source_match, and we don't have
# to look for that package any more.
if self.source_pkgs:
found = []
for pkg in self.source_pkgs:
try:
mod = sys.modules[pkg]
except KeyError:
continue
found.append(pkg)
try:
pkg_file = mod.__file__
except AttributeError:
self._warn("Module %s has no Python source." % pkg)
else:
d, f = os.path.split(pkg_file)
if f.startswith('__init__.'):
# This is actually a package, return the directory.
pkg_file = d
else:
pkg_file = self._source_for_file(pkg_file)
pkg_file = self.file_locator.canonical_filename(pkg_file)
self.source.append(pkg_file)
self.source_match.add(pkg_file)
for pkg in found:
self.source_pkgs.remove(pkg)
def use_cache(self, usecache):
"""Control the use of a data file (incorrectly called a cache).
`usecache` is true or false, whether to read and write data on disk.
"""
self.data.usefile(usecache)
def load(self):
"""Load previously-collected coverage data from the data file."""
self.collector.reset()
self.data.read()
def start(self):
"""Start measuring code coverage."""
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self.auto_data:
self.load()
# Save coverage data when Python exits.
if not self.atexit_registered:
atexit.register(self.save)
self.atexit_registered = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
self.source_match = TreeMatcher(self.source)
else:
if self.cover_dir:
self.cover_match = TreeMatcher([self.cover_dir])
if self.pylib_dirs:
self.pylib_match = TreeMatcher(self.pylib_dirs)
if self.include:
self.include_match = FnmatchMatcher(self.include)
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
self._harvested = False
self.collector.start()
def stop(self):
"""Stop measuring code coverage."""
self.collector.stop()
self._harvest_data()
def erase(self):
"""Erase previously-collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self.collector.reset()
self.data.erase()
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See `exclude` for the lists
that are available, and their meaning.
"""
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
data_suffix = self.data_suffix
if data_suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
data_suffix = "%s.%s.%06d" % (
self.socket.gethostname(), self.os.getpid(),
self.random.randint(0, 99999)
)
self._harvest_data()
self.data.write(suffix=data_suffix)
def combine(self):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
"""
self.data.combine_parallel_data()
def _harvest_data(self):
"""Get the collected data and reset the collector.
Also warn about various problems collecting data.
"""
if not self._harvested:
self.data.add_line_data(self.collector.get_line_data())
self.data.add_arc_data(self.collector.get_arc_data())
self.collector.reset()
# If there are still entries in the source_pkgs list, then we never
# encountered those packages.
for pkg in self.source_pkgs:
self._warn("Module %s was never imported." % pkg)
# Find out if we got any data.
summary = self.data.summary()
if not summary:
self._warn("No data was collected.")
# Find files that were never executed at all.
for src in self.source:
for py_file in find_python_files(src):
self.data.touch_file(py_file)
self._harvested = True
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a filename. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The filename for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
analysis = self._analyze(morf)
return (
analysis.filename, analysis.statements, analysis.excluded,
analysis.missing, analysis.missing_formatted()
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
return Analysis(self, it)
def report(self, morfs=None, show_missing=True, ignore_errors=None,
file=None, # pylint: disable=W0622
omit=None, include=None
):
"""Write a summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
`include` is a list of filename patterns. Modules whose filenames
match those patterns will be included in the report. Modules matching
`omit` will not be included in the report.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = SummaryReporter(
self, show_missing, self.config.ignore_errors
)
reporter.report(morfs, outfile=file, config=self.config)
def annotate(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Annotate a list of modules.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = AnnotateReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Generate an HTML report.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
html_dir=directory,
)
reporter = HtmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
else:
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
reporter = XmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, outfile=outfile, config=self.config)
finally:
if file_to_close:
file_to_close.close()
def sysinfo(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
import platform, re
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('cover_dir', self.cover_dir),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
('data_path', self.data.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('cwd', os.getcwd()),
('path', sys.path),
('environment', [
("%s = %s" % (k, v)) for k, v in os.environ.items()
if re.search("^COV|^PY", k)
]),
]
return info
def process_startup():
"""Call this at Python startup to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
if os.environ.get("COVERAGE_COVERAGE"):
# Measuring coverage within coverage.py takes yet more trickery.
cov.cover_dir = "Please measure coverage.py!"
cov.start()
|
akiokio/centralfitestoque
|
src/.pycharm_helpers/coverage/control.py
|
Python
|
bsd-2-clause
| 25,880
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#=======================================================================
#
# hash_tester.py
# --------------
# This program sends several commands to the coretest_hashed subsystem
# in order to verify the SHA-1, SHA-256 and SHA-512/x hash function
# cores. The program will use the built in hash implementations in
# Python to do functional comparison and validation.
#
# Note: This program requires the PySerial module.
# http://pyserial.sourceforge.net/
#
# The single and dual block test cases are taken from the
# NIST KAT document:
# http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA_All.pdf
#
#
# Author: Joachim Strömbergson
# Copyright (c) 2014, Secworks Sweden AB
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#=======================================================================
#-------------------------------------------------------------------
# Python module imports.
#-------------------------------------------------------------------
import sys
import serial
import os
import time
import threading
import hashlib
#-------------------------------------------------------------------
# Defines.
#-------------------------------------------------------------------
# Serial port defines.
# CONFIGURE YOUR DEVICE HERE!
SERIAL_DEVICE = '/dev/cu.usbserial-A801SA6T'
BAUD_RATE = 9600
BIT_RATE = int(50E6 / BAUD_RATE)
BAUD_RATE2 = 256000
BIT_RATE2 = int(50E6 / BAUD_RATE2)
DATA_BITS = 8
STOP_BITS = 1
# Verbose operation on/off
VERBOSE = False
# Delay times we wait
PROC_DELAY_TIME = 0.0001
COMM_DELAY_TIME = 0.01
# Command codes.
SOC = '\x55'
EOC = '\xaa'
READ_CMD = '\x10'
WRITE_CMD = '\x11'
# Memory map.
UART_ADDR_PREFIX = '\x00'
UART_ADDR_BIT_RATE = '\x10'
UART_ADDR_DATA_BITS = '\x11'
UART_ADDR_STOP_BITS = '\x12'
SHA1_ADDR_PREFIX = '\x10'
SHA1_ADDR_NAME0 = '\x00'
SHA1_ADDR_NAME1 = '\x01'
SHA1_ADDR_VERSION = '\x02'
SHA1_ADDR_CTRL = '\x08'
SHA1_CTRL_INIT_CMD = '\x01'
SHA1_CTRL_NEXT_CMD = '\x02'
SHA1_ADDR_STATUS = '\x09'
SHA1_STATUS_READY_BIT = 0
SHA1_STATUS_VALID_BIT = 1
SHA1_ADDR_BLOCK0 = '\x10'
SHA1_ADDR_BLOCK1 = '\x11'
SHA1_ADDR_BLOCK2 = '\x12'
SHA1_ADDR_BLOCK3 = '\x13'
SHA1_ADDR_BLOCK4 = '\x14'
SHA1_ADDR_BLOCK5 = '\x15'
SHA1_ADDR_BLOCK6 = '\x16'
SHA1_ADDR_BLOCK7 = '\x17'
SHA1_ADDR_BLOCK8 = '\x18'
SHA1_ADDR_BLOCK9 = '\x19'
SHA1_ADDR_BLOCK10 = '\x1a'
SHA1_ADDR_BLOCK11 = '\x1b'
SHA1_ADDR_BLOCK12 = '\x1c'
SHA1_ADDR_BLOCK13 = '\x1d'
SHA1_ADDR_BLOCK14 = '\x1e'
SHA1_ADDR_BLOCK15 = '\x1f'
SHA1_ADDR_DIGEST0 = '\x20'
SHA1_ADDR_DIGEST1 = '\x21'
SHA1_ADDR_DIGEST2 = '\x22'
SHA1_ADDR_DIGEST3 = '\x23'
SHA1_ADDR_DIGEST4 = '\x24'
SHA256_ADDR_PREFIX = '\x20'
SHA256_ADDR_NAME0 = '\x00'
SHA256_ADDR_NAME1 = '\x01'
SHA256_ADDR_VERSION = '\x02'
SHA256_ADDR_CTRL = '\x08'
SHA256_CTRL_INIT_CMD = '\x01'
SHA256_CTRL_NEXT_CMD = '\x02'
SHA256_ADDR_STATUS = '\x09'
SHA256_STATUS_READY_BIT = 0
SHA256_STATUS_VALID_BIT = 1
SHA256_ADDR_BLOCK0 = '\x10'
SHA256_ADDR_BLOCK1 = '\x11'
SHA256_ADDR_BLOCK2 = '\x12'
SHA256_ADDR_BLOCK3 = '\x13'
SHA256_ADDR_BLOCK4 = '\x14'
SHA256_ADDR_BLOCK5 = '\x15'
SHA256_ADDR_BLOCK6 = '\x16'
SHA256_ADDR_BLOCK7 = '\x17'
SHA256_ADDR_BLOCK8 = '\x18'
SHA256_ADDR_BLOCK9 = '\x19'
SHA256_ADDR_BLOCK10 = '\x1a'
SHA256_ADDR_BLOCK11 = '\x1b'
SHA256_ADDR_BLOCK12 = '\x1c'
SHA256_ADDR_BLOCK13 = '\x1d'
SHA256_ADDR_BLOCK14 = '\x1e'
SHA256_ADDR_BLOCK15 = '\x1f'
SHA256_ADDR_DIGEST0 = '\x20'
SHA256_ADDR_DIGEST1 = '\x21'
SHA256_ADDR_DIGEST2 = '\x22'
SHA256_ADDR_DIGEST3 = '\x23'
SHA256_ADDR_DIGEST4 = '\x24'
SHA256_ADDR_DIGEST5 = '\x25'
SHA256_ADDR_DIGEST6 = '\x26'
SHA256_ADDR_DIGEST7 = '\x27'
SHA512_ADDR_PREFIX = '\x30'
SHA512_ADDR_NAME0 = '\x00'
SHA512_ADDR_NAME1 = '\x01'
SHA512_ADDR_VERSION = '\x02'
SHA512_ADDR_CTRL = '\x08'
SHA512_CTRL_INIT_CMD = '\x01'
SHA512_CTRL_NEXT_CMD = '\x02'
SHA512_CTRL_MODE_LOW = 2
SHA512_CTRL_MODE_HIGH = 3
SHA512_ADDR_STATUS = '\x09'
SHA512_STATUS_READY_BIT = 0
SHA512_STATUS_VALID_BIT = 1
SHA512_ADDR_BLOCK0 = '\x10'
SHA512_ADDR_BLOCK1 = '\x11'
SHA512_ADDR_BLOCK2 = '\x12'
SHA512_ADDR_BLOCK3 = '\x13'
SHA512_ADDR_BLOCK4 = '\x14'
SHA512_ADDR_BLOCK5 = '\x15'
SHA512_ADDR_BLOCK6 = '\x16'
SHA512_ADDR_BLOCK7 = '\x17'
SHA512_ADDR_BLOCK8 = '\x18'
SHA512_ADDR_BLOCK9 = '\x19'
SHA512_ADDR_BLOCK10 = '\x1a'
SHA512_ADDR_BLOCK11 = '\x1b'
SHA512_ADDR_BLOCK12 = '\x1c'
SHA512_ADDR_BLOCK13 = '\x1d'
SHA512_ADDR_BLOCK14 = '\x1e'
SHA512_ADDR_BLOCK15 = '\x1f'
SHA512_ADDR_BLOCK16 = '\x20'
SHA512_ADDR_BLOCK17 = '\x21'
SHA512_ADDR_BLOCK18 = '\x22'
SHA512_ADDR_BLOCK19 = '\x23'
SHA512_ADDR_BLOCK20 = '\x24'
SHA512_ADDR_BLOCK21 = '\x25'
SHA512_ADDR_BLOCK22 = '\x26'
SHA512_ADDR_BLOCK23 = '\x27'
SHA512_ADDR_BLOCK24 = '\x28'
SHA512_ADDR_BLOCK25 = '\x29'
SHA512_ADDR_BLOCK26 = '\x2a'
SHA512_ADDR_BLOCK27 = '\x2b'
SHA512_ADDR_BLOCK28 = '\x2c'
SHA512_ADDR_BLOCK29 = '\x2d'
SHA512_ADDR_BLOCK30 = '\x2e'
SHA512_ADDR_BLOCK31 = '\x2f'
SHA512_ADDR_DIGEST0 = '\x40'
SHA512_ADDR_DIGEST1 = '\x41'
SHA512_ADDR_DIGEST2 = '\x42'
SHA512_ADDR_DIGEST3 = '\x43'
SHA512_ADDR_DIGEST4 = '\x44'
SHA512_ADDR_DIGEST5 = '\x45'
SHA512_ADDR_DIGEST6 = '\x46'
SHA512_ADDR_DIGEST7 = '\x47'
SHA512_ADDR_DIGEST8 = '\x48'
SHA512_ADDR_DIGEST9 = '\x49'
SHA512_ADDR_DIGEST10 = '\x4a'
SHA512_ADDR_DIGEST11 = '\x4b'
SHA512_ADDR_DIGEST12 = '\x4c'
SHA512_ADDR_DIGEST13 = '\x4d'
SHA512_ADDR_DIGEST14 = '\x4e'
SHA512_ADDR_DIGEST15 = '\x4f'
MODE_SHA_512_224 = '\x00'
MODE_SHA_512_256 = '\x01'
MODE_SHA_384 = '\x02'
MODE_SHA_512 = '\x03'
sha1_block_addr = [SHA1_ADDR_BLOCK0, SHA1_ADDR_BLOCK1,
SHA1_ADDR_BLOCK2, SHA1_ADDR_BLOCK3,
SHA1_ADDR_BLOCK4, SHA1_ADDR_BLOCK5,
SHA1_ADDR_BLOCK6, SHA1_ADDR_BLOCK7,
SHA1_ADDR_BLOCK8, SHA1_ADDR_BLOCK9,
SHA1_ADDR_BLOCK10, SHA1_ADDR_BLOCK11,
SHA1_ADDR_BLOCK12, SHA1_ADDR_BLOCK13,
SHA1_ADDR_BLOCK14, SHA1_ADDR_BLOCK15]
sha1_digest_addr = [SHA1_ADDR_DIGEST0, SHA1_ADDR_DIGEST1,
SHA1_ADDR_DIGEST2, SHA1_ADDR_DIGEST3,
SHA1_ADDR_DIGEST4]
sha256_block_addr = [SHA256_ADDR_BLOCK0, SHA256_ADDR_BLOCK1,
SHA256_ADDR_BLOCK2, SHA256_ADDR_BLOCK3,
SHA256_ADDR_BLOCK4, SHA256_ADDR_BLOCK5,
SHA256_ADDR_BLOCK6, SHA256_ADDR_BLOCK7,
SHA256_ADDR_BLOCK8, SHA256_ADDR_BLOCK9,
SHA256_ADDR_BLOCK10, SHA256_ADDR_BLOCK11,
SHA256_ADDR_BLOCK12, SHA256_ADDR_BLOCK13,
SHA256_ADDR_BLOCK14, SHA256_ADDR_BLOCK15]
sha256_digest_addr = [SHA256_ADDR_DIGEST0, SHA256_ADDR_DIGEST1,
SHA256_ADDR_DIGEST2, SHA256_ADDR_DIGEST3,
SHA256_ADDR_DIGEST4, SHA256_ADDR_DIGEST5,
SHA256_ADDR_DIGEST6, SHA256_ADDR_DIGEST7]
sha512_block_addr = [SHA512_ADDR_BLOCK0, SHA512_ADDR_BLOCK1,
SHA512_ADDR_BLOCK2, SHA512_ADDR_BLOCK3,
SHA512_ADDR_BLOCK4, SHA512_ADDR_BLOCK5,
SHA512_ADDR_BLOCK6, SHA512_ADDR_BLOCK7,
SHA512_ADDR_BLOCK8, SHA512_ADDR_BLOCK9,
SHA512_ADDR_BLOCK10, SHA512_ADDR_BLOCK11,
SHA512_ADDR_BLOCK12, SHA512_ADDR_BLOCK13,
SHA512_ADDR_BLOCK14, SHA512_ADDR_BLOCK15,
SHA512_ADDR_BLOCK16, SHA512_ADDR_BLOCK17,
SHA512_ADDR_BLOCK18, SHA512_ADDR_BLOCK19,
SHA512_ADDR_BLOCK20, SHA512_ADDR_BLOCK21,
SHA512_ADDR_BLOCK22, SHA512_ADDR_BLOCK23,
SHA512_ADDR_BLOCK24, SHA512_ADDR_BLOCK25,
SHA512_ADDR_BLOCK26, SHA512_ADDR_BLOCK27,
SHA512_ADDR_BLOCK28, SHA512_ADDR_BLOCK29,
SHA512_ADDR_BLOCK30, SHA512_ADDR_BLOCK31]
sha512_digest_addr = [SHA512_ADDR_DIGEST0, SHA512_ADDR_DIGEST1,
SHA512_ADDR_DIGEST2, SHA512_ADDR_DIGEST3,
SHA512_ADDR_DIGEST4, SHA512_ADDR_DIGEST5,
SHA512_ADDR_DIGEST6, SHA512_ADDR_DIGEST7,
SHA512_ADDR_DIGEST8, SHA512_ADDR_DIGEST9,
SHA512_ADDR_DIGEST10, SHA512_ADDR_DIGEST11,
SHA512_ADDR_DIGEST12, SHA512_ADDR_DIGEST13,
SHA512_ADDR_DIGEST14, SHA512_ADDR_DIGEST15]
NIST_512_SINGLE = ['\x61', '\x62', '\x63', '\x80', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x18']
NIST_512_DOUBLE0 = ['\x61', '\x62', '\x63', '\x64', '\x62', '\x63', '\x64', '\x65',
'\x63', '\x64', '\x65', '\x66', '\x64', '\x65', '\x66', '\x67',
'\x65', '\x66', '\x67', '\x68', '\x66', '\x67', '\x68', '\x69',
'\x67', '\x68', '\x69', '\x6A', '\x68', '\x69', '\x6A', '\x6B',
'\x69', '\x6A', '\x6B', '\x6C', '\x6A', '\x6B', '\x6C', '\x6D',
'\x6B', '\x6C', '\x6D', '\x6E', '\x6C', '\x6D', '\x6E', '\x6F',
'\x6D', '\x6E', '\x6F', '\x70', '\x6E', '\x6F', '\x70', '\x71',
'\x80', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00']
NIST_512_DOUBLE1 = ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01', '\xC0']
NIST_1024_SINGLE = ['\x61', '\x62', '\x63', '\x80', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x18']
NIST_1024_DOUBLE0 = ['\x61', '\x62', '\x63', '\x64', '\x65', '\x66', '\x67', '\x68',
'\x62', '\x63', '\x64', '\x65', '\x66', '\x67', '\x68', '\x69',
'\x63', '\x64', '\x65', '\x66', '\x67', '\x68', '\x69', '\x6a',
'\x64', '\x65', '\x66', '\x67', '\x68', '\x69', '\x6a', '\x6b',
'\x65', '\x66', '\x67', '\x68', '\x69', '\x6a', '\x6b', '\x6c',
'\x66', '\x67', '\x68', '\x69', '\x6a', '\x6b', '\x6c', '\x6d',
'\x67', '\x68', '\x69', '\x6a', '\x6b', '\x6c', '\x6d', '\x6e',
'\x68', '\x69', '\x6a', '\x6b', '\x6c', '\x6d', '\x6e', '\x6f',
'\x69', '\x6a', '\x6b', '\x6c', '\x6d', '\x6e', '\x6f', '\x70',
'\x6a', '\x6b', '\x6c', '\x6d', '\x6e', '\x6f', '\x70', '\x71',
'\x6b', '\x6c', '\x6d', '\x6e', '\x6f', '\x70', '\x71', '\x72',
'\x6c', '\x6d', '\x6e', '\x6f', '\x70', '\x71', '\x72', '\x73',
'\x6d', '\x6e', '\x6f', '\x70', '\x71', '\x72', '\x73', '\x74',
'\x6e', '\x6f', '\x70', '\x71', '\x72', '\x73', '\x74', '\x75',
'\x80', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00']
NIST_1024_DOUBLE1 = ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x03', '\x80']
#-------------------------------------------------------------------
# print_response()
#
# Parses a received buffer and prints the response.
#-------------------------------------------------------------------
def print_response(buffer):
if VERBOSE:
print "Length of response: %d" % len(buffer)
if buffer[0] == '\xaa':
print "Response contains correct Start of Response (SOR)"
if buffer[-1] == '\x55':
print "Response contains correct End of Response (EOR)"
response_code = ord(buffer[1])
if response_code == 0xfe:
print "UNKNOWN response code received."
elif response_code == 0xfd:
print "ERROR response code received."
elif response_code == 0x7f:
read_addr = ord(buffer[2]) * 256 + ord(buffer[3])
read_data = (ord(buffer[4]) * 16777216) + (ord(buffer[5]) * 65536) +\
(ord(buffer[6]) * 256) + ord(buffer[7])
print "READ_OK. address 0x%02x = 0x%08x." % (read_addr, read_data)
elif response_code == 0x7e:
read_addr = ord(buffer[2]) * 256 + ord(buffer[3])
print "WRITE_OK. address 0x%02x." % (read_addr)
elif response_code == 0x7d:
print "RESET_OK."
else:
print "Response 0x%02x is unknown." % response_code
print buffer
#-------------------------------------------------------------------
# read_serial_thread()
#
# Function used in a thread to read from the serial port and
# collect response from coretest.
#-------------------------------------------------------------------
def read_serial_thread(serialport):
if VERBOSE:
print "Serial port response thread started. Waiting for response..."
buffer = []
while True:
if serialport.isOpen():
response = serialport.read()
buffer.append(response)
if ((response == '\x55') and len(buffer) > 7):
print_response(buffer)
buffer = []
else:
print "No open device yet."
time.sleep(COMM_DELAY_TIME)
#-------------------------------------------------------------------
# write_serial_bytes()
#
# Send the bytes in the buffer to coretest over the serial port.
#-------------------------------------------------------------------
def write_serial_bytes(tx_cmd, serialport):
if VERBOSE:
print "Command to be sent:", tx_cmd
for tx_byte in tx_cmd:
serialport.write(tx_byte)
# Allow the device to complete the transaction.
time.sleep(COMM_DELAY_TIME)
#-------------------------------------------------------------------
# single_block_test_sha512x()
#
# Write a given block to SHA-512/x and perform single block
# processing for the given mode.
#-------------------------------------------------------------------
def single_block_test_sha512x(block, mode, ser):
# Write block to SHA-512.
for i in range(len(block) / 4):
message = [SOC, WRITE_CMD, SHA512_ADDR_PREFIX,] + [sha512_block_addr[i]] +\
block[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
mode_cmd = chr(ord(SHA512_CTRL_INIT_CMD) + (ord(mode) << SHA512_CTRL_MODE_LOW))
write_serial_bytes([SOC, WRITE_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_CTRL,
'\x00', '\x00', '\x00', mode_cmd, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_STATUS, EOC], ser)
# Select the correct number of digest addresses to read.
if (mode == MODE_SHA_512_224):
mode_digest_addr = sha512_digest_addr[0 : 7]
elif (mode == MODE_SHA_512_256):
mode_digest_addr = sha512_digest_addr[0 : 8]
elif (mode == MODE_SHA_384):
mode_digest_addr = sha512_digest_addr[0 : 12]
elif (mode == MODE_SHA_512):
mode_digest_addr = sha512_digest_addr
# Extract the digest.
for digest_addr in mode_digest_addr:
message = [SOC, READ_CMD, SHA512_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# dual_block_test_sha512x()
#
# Write a given block to SHA-512/x and perform single block
# processing for the given mode.
#-------------------------------------------------------------------
def dual_block_test_sha512x(block0, block1, mode, ser):
# Write block0 to SHA-512.
for i in range(len(block0) / 4):
message = [SOC, WRITE_CMD, SHA512_ADDR_PREFIX,] + [sha512_block_addr[i]] +\
block0[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
mode_cmd = chr(ord(SHA512_CTRL_INIT_CMD) + (ord(mode) << SHA512_CTRL_MODE_LOW))
write_serial_bytes([SOC, WRITE_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_CTRL,
'\x00', '\x00', '\x00', mode_cmd, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_STATUS, EOC], ser)
# Write block1 to SHA-512.
for i in range(len(block1) / 4):
message = [SOC, WRITE_CMD, SHA512_ADDR_PREFIX,] + [sha512_block_addr[i]] +\
block1[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start next block hashing, wait and check status.
mode_cmd = chr(ord(SHA512_CTRL_NEXT_CMD) + (ord(mode) << SHA512_CTRL_MODE_LOW))
write_serial_bytes([SOC, WRITE_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_CTRL,
'\x00', '\x00', '\x00', mode_cmd, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_STATUS, EOC], ser)
# Select the correct number of digest addresses to read.
if (mode == MODE_SHA_512_224):
mode_digest_addr = sha512_digest_addr[0 : 7]
elif (mode == MODE_SHA_512_256):
mode_digest_addr = sha512_digest_addr[0 : 8]
elif (mode == MODE_SHA_384):
mode_digest_addr = sha512_digest_addr[0 : 12]
elif (mode == MODE_SHA_512):
mode_digest_addr = sha512_digest_addr
# Extract the digest.
for digest_addr in mode_digest_addr:
message = [SOC, READ_CMD, SHA512_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# single_block_test_sha256()
#
# Write a given block to SHA-256 and perform single block
# processing.
#-------------------------------------------------------------------
def single_block_test_sha256(block, ser):
# Write block to SHA-2.
for i in range(len(block) / 4):
message = [SOC, WRITE_CMD, SHA256_ADDR_PREFIX,] + [sha256_block_addr[i]] +\
block[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA256_CTRL_INIT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_STATUS, EOC], ser)
# Extract the digest.
for digest_addr in sha256_digest_addr:
message = [SOC, READ_CMD, SHA256_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# double_block_test_sha256()
#
# Run double block message test.
#-------------------------------------------------------------------
def double_block_test_sha256(block1, block2, ser):
# Write block1 to SHA-256.
for i in range(len(block1) / 4):
message = [SOC, WRITE_CMD, SHA256_ADDR_PREFIX,] + [sha256_block_addr[i]] +\
block1[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA256_CTRL_INIT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_STATUS, EOC], ser)
# Extract the first digest.
for digest_addr in sha256_digest_addr:
message = [SOC, READ_CMD, SHA256_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
# Write block2 to SHA-256.
for i in range(len(block2) / 4):
message = [SOC, WRITE_CMD, SHA256_ADDR_PREFIX,] + [sha256_block_addr[i]] +\
block2[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start next block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA256_CTRL_NEXT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_STATUS, EOC], ser)
# Extract the second digest.
for digest_addr in sha256_digest_addr:
message = [SOC, READ_CMD, SHA256_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# huge_message_test_sha256()
#
# Test with a message with a huge number (n) number of blocks.
#-------------------------------------------------------------------
def huge_message_test_sha256(block, n, ser):
# Write block to SHA-256.
for i in range(len(block) / 4):
message = [SOC, WRITE_CMD, SHA256_ADDR_PREFIX,] + [sha256_block_addr[i]] +\
block[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA256_CTRL_INIT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_STATUS, EOC], ser)
# Extract the first digest.
print "Digest for block 0000:"
for digest_addr in sha256_digest_addr:
message = [SOC, READ_CMD, SHA256_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
# First block done. Do the rest.
for i in range(n - 1):
# Start next block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA256_CTRL_NEXT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_STATUS, EOC], ser)
# Extract the second digest.
print "Digest for block %04d" % (i + 1)
for digest_addr in sha256_digest_addr:
message = [SOC, READ_CMD, SHA256_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# single_block_test_sha1()
#
# Write a given block to SHA-1 and perform single block
# processing.
#-------------------------------------------------------------------
def single_block_test_sha1(block, ser):
# Write block to SHA-1.
for i in range(len(block) / 4):
message = [SOC, WRITE_CMD, SHA1_ADDR_PREFIX,] + [sha1_block_addr[i]] +\
block[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA1_CTRL_INIT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_STATUS, EOC], ser)
# Extract the digest.
for digest_addr in sha1_digest_addr:
message = [SOC, READ_CMD, SHA1_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# double_block_test_sha1
#
# Run double block message test for SHA-1.
#-------------------------------------------------------------------
def double_block_test_sha1(block1, block2, ser):
# Write block1 to SHA-1.
for i in range(len(block1) / 4):
message = [SOC, WRITE_CMD, SHA1_ADDR_PREFIX,] + [sha1_block_addr[i]] +\
block1[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start initial block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA1_CTRL_INIT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_STATUS, EOC], ser)
# Extract the first digest.
for digest_addr in sha1_digest_addr:
message = [SOC, READ_CMD, SHA1_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
# Write block2 to SHA-1.
for i in range(len(block2) / 4):
message = [SOC, WRITE_CMD, SHA1_ADDR_PREFIX,] + [sha1_block_addr[i]] +\
block2[(i * 4) : ((i * 4 ) + 4)] + [EOC]
write_serial_bytes(message, ser)
# Start next block hashing, wait and check status.
write_serial_bytes([SOC, WRITE_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_CTRL,
'\x00', '\x00', '\x00', SHA1_CTRL_NEXT_CMD, EOC], ser)
time.sleep(PROC_DELAY_TIME)
write_serial_bytes([SOC, READ_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_STATUS, EOC], ser)
# Extract the second digest.
for digest_addr in sha1_digest_addr:
message = [SOC, READ_CMD, SHA1_ADDR_PREFIX] + [digest_addr] + [EOC]
write_serial_bytes(message, ser)
print""
#-------------------------------------------------------------------
# TC1: Read name and version from SHA-1 core.
#-------------------------------------------------------------------
def tc1(ser):
print "TC1: Reading name, type and version words from SHA-1 core."
write_serial_bytes([SOC, READ_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_NAME0, EOC], ser)
write_serial_bytes([SOC, READ_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_NAME1, EOC], ser)
write_serial_bytes([SOC, READ_CMD, SHA1_ADDR_PREFIX, SHA1_ADDR_VERSION, EOC], ser)
print""
#-------------------------------------------------------------------
# TC2: SHA-1 Single block message test as specified by NIST.
#-------------------------------------------------------------------
def tc2(ser):
print "TC2: Single block message test for SHA-1."
tc2_sha1_expected = [0xa9993e36, 0x4706816a, 0xba3e2571,
0x7850c26c, 0x9cd0d89d]
print "TC2: Expected digest values as specified by NIST:"
for i in tc2_sha1_expected:
print("0x%08x " % i)
print("")
single_block_test_sha1(NIST_512_SINGLE, ser)
#-------------------------------------------------------------------
# TC3: SHA-1 Double block message test as specified by NIST.
#-------------------------------------------------------------------
def tc3(ser):
print "TC3: Double block message test for SHA-1."
tc3_1_sha1_expected = [0xF4286818, 0xC37B27AE, 0x0408F581,
0x84677148, 0x4A566572]
tc3_2_sha1_expected = [0x84983E44, 0x1C3BD26E, 0xBAAE4AA1,
0xF95129E5, 0xE54670F1]
print "TC3: Expected digest values for first block as specified by NIST:"
for i in tc3_1_sha1_expected:
print("0x%08x " % i)
print("")
print "TC3: Expected digest values for second block as specified by NIST:"
for i in tc3_2_sha1_expected:
print("0x%08x " % i)
print("")
double_block_test_sha1(NIST_512_DOUBLE0, NIST_512_DOUBLE1, ser)
#-------------------------------------------------------------------
# TC4: Read name and version from SHA-256 core.
#-------------------------------------------------------------------
def tc4(ser):
print "TC4: Reading name, type and version words from SHA-256 core."
my_cmd = [SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_NAME0, EOC]
write_serial_bytes(my_cmd, ser)
my_cmd = [SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_NAME1, EOC]
write_serial_bytes(my_cmd, ser)
my_cmd = [SOC, READ_CMD, SHA256_ADDR_PREFIX, SHA256_ADDR_VERSION, EOC]
write_serial_bytes(my_cmd, ser)
print""
#-------------------------------------------------------------------
# TC5: SHA-256 Single block message test as specified by NIST.
#-------------------------------------------------------------------
def tc5(ser):
print "TC5: Single block message test for SHA-256."
tc5_sha256_expected = [0xBA7816BF, 0x8F01CFEA, 0x414140DE, 0x5DAE2223,
0xB00361A3, 0x96177A9C, 0xB410FF61, 0xF20015AD]
print "TC5: Expected digest values as specified by NIST:"
for i in tc5_sha256_expected:
print("0x%08x " % i)
print("")
single_block_test_sha256(NIST_512_SINGLE, ser)
#-------------------------------------------------------------------
# TC6: SHA-256 Double block message test as specified by NIST.
#-------------------------------------------------------------------
def tc6(ser):
print "TC6: Double block message test for SHA-256."
tc6_1_sha256_expected = [0x85E655D6, 0x417A1795, 0x3363376A, 0x624CDE5C,
0x76E09589, 0xCAC5F811, 0xCC4B32C1, 0xF20E533A]
tc6_2_sha256_expected = [0x248D6A61, 0xD20638B8, 0xE5C02693, 0x0C3E6039,
0xA33CE459, 0x64FF2167, 0xF6ECEDD4, 0x19DB06C1]
print "TC6: Expected digest values for first block as specified by NIST:"
for i in tc6_1_sha256_expected:
print("0x%08x " % i)
print("")
print "TC6: Expected digest values for second block as specified by NIST:"
for i in tc6_2_sha256_expected:
print("0x%08x " % i)
print("")
double_block_test_sha256(NIST_512_DOUBLE0, NIST_512_DOUBLE1, ser)
#-------------------------------------------------------------------
# TC7: SHA-256 Huge message test.
#-------------------------------------------------------------------
def tc7(ser):
n = 1000
print "TC7: Message with %d blocks test for SHA-256." % n
tc7_block = ['\xaa', '\x55', '\xaa', '\x55', '\xde', '\xad', '\xbe', '\xef',
'\x55', '\xaa', '\x55', '\xaa', '\xf0', '\x0f', '\xf0', '\x0f',
'\xaa', '\x55', '\xaa', '\x55', '\xde', '\xad', '\xbe', '\xef',
'\x55', '\xaa', '\x55', '\xaa', '\xf0', '\x0f', '\xf0', '\x0f',
'\xaa', '\x55', '\xaa', '\x55', '\xde', '\xad', '\xbe', '\xef',
'\x55', '\xaa', '\x55', '\xaa', '\xf0', '\x0f', '\xf0', '\x0f',
'\xaa', '\x55', '\xaa', '\x55', '\xde', '\xad', '\xbe', '\xef',
'\x55', '\xaa', '\x55', '\xaa', '\xf0', '\x0f', '\xf0', '\x0f']
tc7_expected = [0x7638f3bc, 0x500dd1a6, 0x586dd4d0, 0x1a1551af,
0xd821d235, 0x2f919e28, 0xd5842fab, 0x03a40f2a]
huge_message_test_sha256(tc7_block, n, ser)
print "TC7: Expected digest values after %d blocks:" %n
for i in tc7_expected:
print("0x%08x " % i)
print("")
#-------------------------------------------------------------------
# TC8: Read name and version from SHA-512 core.
#-------------------------------------------------------------------
def tc8(ser):
print "TC8: Reading name, type and version words from SHA-512 core."
my_cmd = [SOC, READ_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_NAME0, EOC]
write_serial_bytes(my_cmd, ser)
my_cmd = [SOC, READ_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_NAME1, EOC]
write_serial_bytes(my_cmd, ser)
my_cmd = [SOC, READ_CMD, SHA512_ADDR_PREFIX, SHA512_ADDR_VERSION, EOC]
write_serial_bytes(my_cmd, ser)
print""
#-------------------------------------------------------------------
# TC9: Single block tests of SHA-512/x
#
# We do this for all modes.
#-------------------------------------------------------------------
def tc9(ser):
print "TC9: Single block message test for SHA-512/x."
tc9_224_expected = [0x4634270f, 0x707b6a54, 0xdaae7530, 0x460842e2,
0x0e37ed26, 0x5ceee9a4, 0x3e8924aa]
tc9_256_expected = [0x53048e26, 0x81941ef9, 0x9b2e29b7, 0x6b4c7dab,
0xe4c2d0c6, 0x34fc6d46, 0xe0e2f131, 0x07e7af23]
tc9_384_expected = [0xcb00753f, 0x45a35e8b, 0xb5a03d69, 0x9ac65007,
0x272c32ab, 0x0eded163, 0x1a8b605a, 0x43ff5bed,
0x8086072b, 0xa1e7cc23, 0x58baeca1, 0x34c825a7]
tc9_512_expected = [0xddaf35a1, 0x93617aba, 0xcc417349, 0xae204131,
0x12e6fa4e, 0x89a97ea2, 0x0a9eeee6, 0x4b55d39a,
0x2192992a, 0x274fc1a8, 0x36ba3c23, 0xa3feebbd,
0x454d4423, 0x643ce80e, 0x2a9ac94f, 0xa54ca49f]
print "TC9-1: Expected digest values for SHA-512/224 as specified by NIST:"
for i in tc9_224_expected:
print("0x%08x " % i)
single_block_test_sha512x(NIST_1024_SINGLE, MODE_SHA_512_224, ser)
print("")
print "TC9-2: Expected digest values for SHA-512/256 as specified by NIST:"
for i in tc9_256_expected:
print("0x%08x " % i)
single_block_test_sha512x(NIST_1024_SINGLE, MODE_SHA_512_256, ser)
print("")
print "TC9-3: Expected digest values for SHA-384 as specified by NIST:"
for i in tc9_384_expected:
print("0x%08x " % i)
single_block_test_sha512x(NIST_1024_SINGLE, MODE_SHA_384, ser)
print("")
print "TC9-4: Expected digest values for SHA-512 as specified by NIST:"
for i in tc9_512_expected:
print("0x%08x " % i)
single_block_test_sha512x(NIST_1024_SINGLE, MODE_SHA_512, ser)
print("")
#-------------------------------------------------------------------
# TC10: Dual block tests of SHA-512/x
#
# We do this for all modes.
#-------------------------------------------------------------------
def tc10(ser):
print "TC9: Single block message test for SHA-512/x."
tc10_224_expected = [0x23fec5bb, 0x94d60b23, 0x30819264, 0x0b0c4533,
0x35d66473, 0x4fe40e72, 0x68674af9]
tc10_256_expected = [0x3928e184, 0xfb8690f8, 0x40da3988, 0x121d31be,
0x65cb9d3e, 0xf83ee614, 0x6feac861, 0xe19b563a]
tc10_384_expected = [0x09330c33, 0xf71147e8, 0x3d192fc7, 0x82cd1b47,
0x53111b17, 0x3b3b05d2, 0x2fa08086, 0xe3b0f712,
0xfcc7c71a, 0x557e2db9, 0x66c3e9fa, 0x91746039]
tc10_512_expected = [0x8e959b75, 0xdae313da, 0x8cf4f728, 0x14fc143f,
0x8f7779c6, 0xeb9f7fa1, 0x7299aead, 0xb6889018,
0x501d289e, 0x4900f7e4, 0x331b99de, 0xc4b5433a,
0xc7d329ee, 0xb6dd2654, 0x5e96e55b, 0x874be909]
print "TC10-1: Expected digest values for SHA-512/224 as specified by NIST:"
for i in tc10_224_expected:
print("0x%08x " % i)
dual_block_test_sha512x(NIST_1024_DOUBLE0, NIST_1024_DOUBLE1, MODE_SHA_512_224, ser)
print("")
print "TC10-2: Expected digest values for SHA-512/256 as specified by NIST:"
for i in tc10_256_expected:
print("0x%08x " % i)
dual_block_test_sha512x(NIST_1024_DOUBLE0, NIST_1024_DOUBLE1, MODE_SHA_512_256, ser)
print("")
print "TC10-3: Expected digest values for SHA-384 as specified by NIST:"
for i in tc10_384_expected:
print("0x%08x " % i)
dual_block_test_sha512x(NIST_1024_DOUBLE0, NIST_1024_DOUBLE1, MODE_SHA_384, ser)
print("")
print "TC10-4: Expected digest values for SHA-512 as specified by NIST:"
for i in tc10_512_expected:
print("0x%08x " % i)
dual_block_test_sha512x(NIST_1024_DOUBLE0, NIST_1024_DOUBLE1, MODE_SHA_512, ser)
print("")
#-------------------------------------------------------------------
# main()
#
# Parse any arguments and run the tests.
#-------------------------------------------------------------------
def main():
# Open device
ser = serial.Serial()
ser.port=SERIAL_DEVICE
ser.baudrate=BAUD_RATE
ser.bytesize=DATA_BITS
ser.parity='N'
ser.stopbits=STOP_BITS
ser.timeout=1
ser.writeTimeout=0
if VERBOSE:
print "Setting up a serial port and starting a receive thread."
try:
ser.open()
except:
print "Error: Can't open serial device."
sys.exit(1)
# Try and switch baud rate in the FPGA and then here.
bit_rate_high = chr((BIT_RATE2 >> 8) & 0xff)
bit_rate_low = chr(BIT_RATE2 & 0xff)
if VERBOSE:
print("Changing to new baud rate.")
print("Baud rate: %d" % BAUD_RATE2)
print("Bit rate high byte: 0x%02x" % ord(bit_rate_high))
print("Bit rate low byte: 0x%02x" % ord(bit_rate_low))
write_serial_bytes([SOC, WRITE_CMD, UART_ADDR_PREFIX, UART_ADDR_BIT_RATE,
'\x00', '\x00', bit_rate_high, bit_rate_low, EOC], ser)
ser.baudrate=BAUD_RATE2
try:
my_thread = threading.Thread(target=read_serial_thread, args=(ser,))
except:
print "Error: Can't start thread."
sys.exit()
my_thread.daemon = True
my_thread.start()
# Run the enabled test cases.
tc_list = [(tc1, False), (tc2, False), (tc3, False), (tc4, False),
(tc5, False), (tc6, False), (tc7, True), (tc8, False),
(tc9, False), (tc10, False)]
for (test_case, action) in tc_list:
if action:
test_case(ser)
# Exit nicely.
time.sleep(50 * COMM_DELAY_TIME)
if VERBOSE:
print "Done. Closing device."
ser.close()
#-------------------------------------------------------------------
# __name__
# Python thingy which allows the file to be run standalone as
# well as parsed from within a Python interpreter.
#-------------------------------------------------------------------
if __name__=="__main__":
# Run the main function.
sys.exit(main())
#=======================================================================
# EOF hash_tester.py
#=======================================================================
|
secworks/coretest_hashes
|
src/sw/hash_tester.py
|
Python
|
bsd-2-clause
| 42,650
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0046_auto_20160428_1703'),
]
operations = [
migrations.AddField(
model_name='language',
name='sortRankInClade',
field=models.IntegerField(default=0),
),
]
|
lingdb/CoBL-public
|
ielex/lexicon/migrations/0047_language_sortrankinclade.py
|
Python
|
bsd-2-clause
| 410
|
import os
import json
import time
from datetime import datetime
from django.conf import settings
from django import template
from django.template.loader import get_template
from django.template import TemplateSyntaxError
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.serializers.json import DjangoJSONEncoder
from ..gizmo_dependencies import global_dependencies
register = template.Library()
CSS_OUTPUT_TYPE = 'css'
JS_OUTPUT_TYPE = 'js'
CSS_EXTENSION = 'css'
JS_EXTENSION = 'js'
EXTERNAL_INDICATOR = '://'
VALID_OUTPUT_TYPES = (CSS_OUTPUT_TYPE, JS_OUTPUT_TYPE)
class HighchartsDateEncoder(DjangoJSONEncoder):
"""
Special Json Encoder for Tethys
"""
def default(self, obj):
# Highcharts date serializer
if isinstance(obj, datetime):
return time.mktime(obj.timetuple()) * 1000
return super(HighchartsDateEncoder, self).default(obj)
@register.filter(is_safe=True)
def isstring(value):
"""
Filter that returns a type
"""
if value is str:
return True
else:
return False
@register.filter
def return_item(l, i):
try:
return l[i]
except:
return None
def json_date_handler(obj):
if isinstance(obj, datetime):
return time.mktime(obj.timetuple()) * 1000
else:
return obj
@register.filter
def jsonify(data):
"""
Convert python data structures into a JSON string
"""
return json.dumps(data, default=json_date_handler)
@register.filter
def divide(value, divisor):
"""
Divide value by divisor
"""
v = float(value)
d = float(divisor)
return v/d
class TethysGizmoIncludeNode(template.Node):
"""
Custom template include node that returns Tethys gizmos
"""
def __init__(self, gizmo_name, options, *args, **kwargs):
super(TethysGizmoIncludeNode, self).__init__(*args, **kwargs)
self.gizmo_name = gizmo_name
self.options = template.Variable(options)
def render(self, context):
try:
# Get the name of the gizmo to load
gizmo_name = self.gizmo_name
gizmo_templates_root = os.path.join('tethys_gizmos', 'gizmos')
# Handle case where gizmo_name is a string literal
if self.gizmo_name[0] in ('"', "'"):
gizmo_name = self.gizmo_name.replace("'", '')
gizmo_name = gizmo_name.replace('"', '')
# Add gizmo name to 'gizmos_rendered' context variable (used to load static libraries
if 'gizmos_rendered' not in context:
context.update({'gizmos_rendered': []})
if gizmo_name not in context['gizmos_rendered']:
context['gizmos_rendered'].append(gizmo_name)
# Determine path to gizmo template
gizmo_file_name = '{0}.html'.format(gizmo_name)
template_name = os.path.join(gizmo_templates_root, gizmo_file_name)
# Retrieve the gizmo template and render
t = get_template(template_name)
c = context.new(self.options.resolve(context))
return t.render(c)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
@register.tag
def gizmo(parser, token):
"""
Similar to the include tag, gizmo loads special templates called gizmos that come with the django-tethys_gizmo
app. Gizmos provide tools for developing user interface elements with minimal code. Examples include date pickers,
maps, and interactive plots.
To insert a gizmo, use the "gizmo" tag and give it the name of a gizmo and a dictionary of configuration parameters.
Example::
{% load tethys_gizmos %}
{% gizmo example_gizmo options %}
{% gizmo "example_gizmo" options %}
NOTE: the "options" dictionary must be a template context variable.
ALSO NOTE: All supporting css and javascript libraries are loaded using the gizmo_dependency tag (see below).
"""
try:
tag_name, gizmo_name, options_literal = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('"%s" tag requires exactly two arguments' % token.contents.split()[0])
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError('"{0}" tag takes at least one argument: the name of the '
'template to be included.'.format(bits[0]))
return TethysGizmoIncludeNode(gizmo_name, options_literal)
class TethysGizmoDependenciesNode(template.Node):
"""
Loads gizmo dependencies and renders in "script" or "link" tag appropriately.
"""
def __init__(self, output_type, *args, **kwargs):
super(TethysGizmoDependenciesNode, self).__init__(*args, **kwargs)
self.output_type = output_type
def render(self, context):
# Get the gizmos rendered from the context
gizmos_rendered = context['gizmos_rendered']
# Compile list of unique gizmo dependencies
dependencies = []
# Add gizmo dependencies
for rendered_gizmo in gizmos_rendered:
try:
# Retrieve the "gizmo_dependencies" module and find the appropriate function
dependencies_module = __import__('tethys_gizmos.gizmo_dependencies', fromlist=[rendered_gizmo])
dependencies_function = getattr(dependencies_module, rendered_gizmo)
# Retrieve a list of dependencies for the gizmo
gizmo_deps = dependencies_function(context)
# Only append dependencies if they do not already exist
for dependency in gizmo_deps:
if EXTERNAL_INDICATOR in dependency:
static_url = dependency
else:
static_url = static(dependency)
if static_url not in dependencies:
# Lookup the static url given the path
dependencies.append(static_url)
except AttributeError:
# Skip those that do not have dependencies
pass
# Add the global dependencies last
for dependency in global_dependencies(context):
if EXTERNAL_INDICATOR in dependency:
static_url = dependency
else:
static_url = static(dependency)
if static_url not in dependencies:
# Lookup the static url given the path
dependencies.append(static_url)
# Create markup tags
script_tags = []
style_tags = []
for dependency in dependencies:
# Only process Script tags if the dependency has a ".js" extension and the output type is JS or not specified
if JS_EXTENSION in dependency and (self.output_type == JS_OUTPUT_TYPE or self.output_type is None):
script_tags.append('<script src="{0}" type="text/javascript"></script>'.format(dependency))
# Only process Style tags if the dependency has a ".css" extension and the output type is CSS or not specified
elif CSS_EXTENSION in dependency and (self.output_type == CSS_OUTPUT_TYPE or self.output_type is None):
style_tags.append('<link href="{0}" rel="stylesheet" />'.format(dependency))
# Combine all tags
tags = style_tags + script_tags
tags_string = '\n'.join(tags)
return tags_string
@register.tag
def gizmo_dependencies(parser, token):
"""
Load all gizmo dependencies (JavaScript and CSS).
Example::
{% gizmo_dependencies css %}
{% gizmo_dependencies js %}
"""
output_type = None
bits = token.split_contents()
if len(bits) > 2:
raise TemplateSyntaxError('"{0}" takes at most one argument: the type of dependencies to output '
'(either "js" or "css")'.format(token.split_contents()[0]))
elif len(bits) == 2:
output_type = bits[1]
# Validate output_type
if output_type:
# Remove quotes
if output_type[0] in ('"', "'"):
output_type = output_type.replace("'", '')
output_type = output_type.replace('"', '')
# Lowercase
output_type = output_type.lower()
# Check for valid values
if output_type not in VALID_OUTPUT_TYPES:
raise TemplateSyntaxError('Invalid output type specified: only "js" and "css" are '
'allowed, "{0}" given.'.format(output_type))
return TethysGizmoDependenciesNode(output_type)
|
CI-WATER/django-tethys_gizmos
|
tethys_gizmos/templatetags/tethys_gizmos.py
|
Python
|
bsd-2-clause
| 8,658
|
import copy
import functools
import logging
import archinfo
from ..errors import SimIRSBError, SimIRSBNoDecodeError, SimValueError
from .engine import SuccessorsMixin
from .. import sim_options as o
from ..misc.ux import once
from ..state_plugins.inspect import BP_AFTER, BP_BEFORE
from ..state_plugins.unicorn_engine import STOP, _UC_NATIVE, unicorn as uc_module
#pylint: disable=arguments-differ
l = logging.getLogger(name=__name__)
class SimEngineUnicorn(SuccessorsMixin):
"""
Concrete execution in the Unicorn Engine, a fork of qemu.
Responds to the following parameters in the step stack:
- step: How many basic blocks we want to execute
- extra_stop_points: A collection of addresses at which execution should halt
"""
def __check(self, num_inst=None, **kwargs): # pylint: disable=unused-argument
state = self.state
if o.UNICORN not in state.options:
l.debug('Unicorn-engine is not enabled.')
return False
if uc_module is None or _UC_NATIVE is None:
if once('unicorn_install_warning'):
l.error("You are attempting to use unicorn engine support even though it or the angr native layer "
"isn't installed")
return False
self.__countdown(state)
# should the countdown still be updated if we're not stepping a whole block?
# current decision: leave it updated, since we are moving forward
if num_inst is not None:
# we don't support single stepping with unicorn
return False
unicorn = state.unicorn # shorthand
# if we have a concrete target we want the program to synchronize the segment
# registers before, otherwise undefined behavior could happen.
if state.project.concrete_target and self.project.arch.name in ('x86', 'x86_64'):
if not state.concrete.segment_registers_initialized:
l.debug("segment register must be synchronized with the concrete target before using unicorn engine")
return False
if state.regs.ip.symbolic:
l.debug("symbolic IP!")
return False
if unicorn.countdown_symbolic_stop > 0:
l.info("not enough blocks since symbolic stop (%d more)", unicorn.countdown_symbolic_stop)
return False
if unicorn.countdown_unsupported_stop > 0:
l.info("not enough blocks since unsupported VEX statement/expression stop (%d more)",
unicorn.countdown_unsupported_stop)
return False
if unicorn.countdown_nonunicorn_blocks > 0:
l.info("not enough runs since last unicorn (%d)", unicorn.countdown_nonunicorn_blocks)
return False
if unicorn.countdown_stop_point > 0:
l.info("not enough blocks since stop point (%d more)", unicorn.countdown_stop_point)
elif o.UNICORN_SYM_REGS_SUPPORT not in state.options and not unicorn._check_registers():
l.info("failed register check")
return False
return True
@staticmethod
def __countdown(state):
state.unicorn.countdown_nonunicorn_blocks -= 1
state.unicorn.countdown_symbolic_stop -= 1
state.unicorn.countdown_unsupported_stop -= 1
state.unicorn.countdown_stop_point -= 1
@staticmethod
def __reset_countdowns(state, next_state):
next_state.unicorn.countdown_symbolic_stop = 0
next_state.unicorn.countdown_unsupported_stop = 0
next_state.unicorn.countdown_nonunicorn_blocks = state.unicorn.countdown_nonunicorn_blocks
next_state.unicorn.countdown_stop_point = state.unicorn.countdown_stop_point
def _execute_block_instrs_in_vex(self, block_details):
if block_details["block_addr"] not in self.block_details_cache:
vex_block_details = self._get_vex_block_details(block_details["block_addr"], block_details["block_size"])
self.block_details_cache[block_details["block_addr"]] = vex_block_details
else:
vex_block_details = self.block_details_cache[block_details["block_addr"]]
# Save breakpoints for restoring later
saved_mem_read_breakpoints = copy.copy(self.state.inspect._breakpoints["mem_read"])
vex_block = vex_block_details["block"]
for reg_name, reg_value in block_details["registers"]:
self.state.registers.store(reg_name, reg_value, inspect=False, disable_actions=True)
# VEX statements to ignore when re-executing instructions that touched symbolic data
ignored_statement_tags = ["Ist_AbiHint", "Ist_IMark", "Ist_MBE", "Ist_NoOP"]
self.state.scratch.set_tyenv(vex_block.tyenv)
for instr_entry in block_details["instrs"]:
self._instr_mem_reads = list(instr_entry["mem_dep"]) # pylint:disable=attribute-defined-outside-init
if self._instr_mem_reads:
# Insert breakpoint to set the correct memory read address
self.state.inspect.b('mem_read', when=BP_BEFORE, action=self._set_correct_mem_read_addr)
instr_vex_stmt_indices = vex_block_details["stmt_indices"][instr_entry["instr_addr"]]
start_index = instr_vex_stmt_indices["start"]
end_index = instr_vex_stmt_indices["end"]
for vex_stmt_idx in range(start_index, end_index + 1):
# Execute handler from HeavyVEXMixin for the statement
vex_stmt = vex_block.statements[vex_stmt_idx]
if vex_stmt.tag not in ignored_statement_tags:
self.stmt_idx = vex_stmt_idx # pylint:disable=attribute-defined-outside-init
super()._handle_vex_stmt(vex_stmt) # pylint:disable=no-member
# Restore breakpoints
self.state.inspect._breakpoints["mem_read"] = copy.copy(saved_mem_read_breakpoints)
del self._instr_mem_reads
del self.stmt_idx
def _execute_symbolic_instrs(self):
for block_details in self.state.unicorn._get_details_of_blocks_with_symbolic_instrs():
try:
if self.state.os_name == "CGC" and block_details["block_addr"] == self.state.unicorn.cgc_receive_addr:
# Re-execute receive syscall
reg_vals = dict(block_details["registers"])
curr_regs = self.state.regs
# If any regs are not present in the block details for re-execute, they are probably symbolic and so
# were not saved in native interface. Use current register values in those cases: they should have
# correct values right now. rx_bytes argument is set to 0 since we care about updating symbolic
# values only
syscall_args = [reg_vals.get("ebx", curr_regs.ebx), reg_vals.get("ecx", curr_regs.ecx),
reg_vals.get("edx", curr_regs.edx), 0]
syscall_simproc = self.state.project.simos.syscall_from_number(3, abi=None)
syscall_simproc.arch = self.state.arch
syscall_simproc.project = self.state.project
syscall_simproc.state = self.state
syscall_simproc.cc = self.state.project.simos.syscall_cc(self.state)
ret_val = getattr(syscall_simproc, syscall_simproc.run_func)(*syscall_args)
self.state.registers.store("eax", ret_val, inspect=False, disable_actions=True)
else:
self._execute_block_instrs_in_vex(block_details)
except SimValueError as e:
l.error(e)
def _get_vex_block_details(self, block_addr, block_size):
# Mostly based on the lifting code in HeavyVEXMixin
# pylint:disable=no-member
irsb = super().lift_vex(addr=block_addr, state=self.state, size=block_size, cross_insn_opt=False)
if irsb.size == 0:
if irsb.jumpkind == 'Ijk_NoDecode':
if not self.state.project.is_hooked(irsb.addr):
raise SimIRSBNoDecodeError(f"IR decoding error at 0x{irsb.addr:02x}. You can hook this instruction"
" with a python replacement using project.hook"
f"(0x{irsb.addr:02x}, your_function, length=length_of_instruction).")
raise SimIRSBError("Block is hooked with custom code but original block was executed in unicorn")
raise SimIRSBError(f"Empty IRSB found at 0x{irsb.addr:02x}.")
instrs_stmt_indices = {}
curr_instr_addr = None
curr_instr_stmts_start_idx = 0
for idx, statement in enumerate(irsb.statements):
if statement.tag == "Ist_IMark":
if curr_instr_addr is not None:
instrs_stmt_indices[curr_instr_addr] = {"start": curr_instr_stmts_start_idx, "end": idx - 1}
curr_instr_addr = statement.addr
curr_instr_stmts_start_idx = idx
# Adding details of the last instruction
instrs_stmt_indices[curr_instr_addr] = {"start": curr_instr_stmts_start_idx, "end": len(irsb.statements) - 1}
block_details = {"block": irsb, "stmt_indices": instrs_stmt_indices}
return block_details
def _set_correct_mem_read_addr(self, state):
assert len(self._instr_mem_reads) != 0
mem_read_val = b""
mem_read_size = 0
mem_read_address = None
mem_read_taint_map = []
mem_read_symbolic = True
while mem_read_size != state.inspect.mem_read_length and self._instr_mem_reads:
next_val = self._instr_mem_reads.pop(0)
if not mem_read_address:
mem_read_address = next_val["address"]
if next_val["symbolic"]:
mem_read_taint_map.extend([1] * next_val["size"])
else:
mem_read_taint_map.extend([0] * next_val["size"])
mem_read_size += next_val["size"]
mem_read_symbolic &= next_val["symbolic"]
mem_read_val += next_val["value"]
assert state.inspect.mem_read_length == mem_read_size
state.inspect.mem_read_address = state.solver.BVV(mem_read_address, state.inspect.mem_read_address.size())
if not mem_read_symbolic:
# Since read is (partially) concrete, insert breakpoint to return the correct concrete value
self.state.inspect.b('mem_read', mem_read_address=mem_read_address, when=BP_AFTER,
action=functools.partial(self._set_correct_mem_read_val, value=mem_read_val,
taint_map=mem_read_taint_map))
def _set_correct_mem_read_val(self, state, value, taint_map): # pylint: disable=no-self-use
if taint_map.count(1) == 0:
# The value is completely concrete
if state.arch.memory_endness == archinfo.Endness.LE:
state.inspect.mem_read_expr = state.solver.BVV(value[::-1])
else:
state.inspect.mem_read_expr = state.solver.BVV(value)
else:
# The value is partially concrete. Use the bitmap to read the symbolic bytes from memory and construct the
# correct value
actual_value = []
for offset, taint in enumerate(taint_map):
if taint == 1:
# Byte is symbolic. Read the value from memory
actual_value.append(state.memory.load(state.inspect.mem_read_address + offset, 1, inspect=False,
disable_actions=True))
else:
actual_value.append(state.solver.BVV(value[offset], 8))
if state.arch.memory_endness == archinfo.Endness.LE:
actual_value = actual_value[::-1]
state.inspect.mem_read_expr = state.solver.Concat(*actual_value)
def process_successors(self, successors, **kwargs):
state = self.state
if not self.__check(**kwargs):
return super().process_successors(successors, **kwargs)
extra_stop_points = kwargs.get('extra_stop_points', None)
last_block_details = kwargs.get('last_block_details', None)
step = kwargs.get('step', None)
if extra_stop_points is None:
extra_stop_points = set(self.project._sim_procedures)
else:
# convert extra_stop_points to a set
extra_stop_points = set(extra_stop_points)
extra_stop_points.update(self.project._sim_procedures)
if successors.addr in extra_stop_points:
# trying to start unicorn execution on a stop point, fallback to next engine
return super().process_successors(successors, **kwargs)
successors.sort = 'Unicorn'
# add all instruction breakpoints as extra_stop_points
if state.supports_inspect:
for bp in state.inspect._breakpoints['instruction']:
# if there is an instruction breakpoint on every instruction, it does not make sense
# to use unicorn.
if "instruction" not in bp.kwargs:
l.info("disabling unicorn because of breakpoint on every instruction")
return super().process_successors(successors, **kwargs)
# add the breakpoint to extra_stop_points. We don't care if the breakpoint is BP_BEFORE or
# BP_AFTER, this is only to stop unicorn when we get near a breakpoint. The breakpoint itself
# will then be handled by another engine that can more accurately step instruction-by-instruction.
extra_stop_points.add(bp.kwargs["instruction"])
# VEX block cache for executing instructions skipped by native interface
self.block_details_cache = {} # pylint:disable=attribute-defined-outside-init
# initialize unicorn plugin
try:
state.unicorn.setup()
except SimValueError:
# it's trying to set a symbolic register somehow
# fail out, force fallback to next engine
self.__reset_countdowns(successors.initial_state, state)
return super().process_successors(successors, **kwargs)
try:
state.unicorn.set_stops(extra_stop_points)
if last_block_details is not None:
state.unicorn.set_last_block_details(last_block_details)
state.unicorn.set_tracking(track_bbls=o.UNICORN_TRACK_BBL_ADDRS in state.options,
track_stack=o.UNICORN_TRACK_STACK_POINTERS in state.options)
state.unicorn.hook()
state.unicorn.start(step=step)
self._execute_symbolic_instrs()
state.unicorn.finish()
finally:
state.unicorn.destroy()
if state.unicorn.steps == 0 or state.unicorn.stop_reason == STOP.STOP_NOSTART:
# fail out, force fallback to next engine
self.__reset_countdowns(successors.initial_state, state)
# TODO: idk what the consequences of this might be. If this failed step can actually change non-unicorn
# state then this is bad news.
return super().process_successors(successors, **kwargs)
description = f'Unicorn ({STOP.name_stop(state.unicorn.stop_reason)} after {state.unicorn.steps} steps)'
state.history.recent_block_count += state.unicorn.steps
state.history.recent_description = description
# this can be expensive, so check first
if state.supports_inspect:
for bp in state.inspect._breakpoints['irsb']:
if bp.check(state, BP_AFTER):
for bbl_addr in state.history.recent_bbl_addrs:
state._inspect('irsb', BP_AFTER, address=bbl_addr)
break
if (state.unicorn.stop_reason in (STOP.symbolic_stop_reasons + STOP.unsupported_reasons) or
state.unicorn.stop_reason in (STOP.STOP_UNKNOWN_MEMORY_WRITE_SIZE, STOP.STOP_VEX_LIFT_FAILED)):
l.info(state.unicorn.stop_message)
if state.unicorn.jumpkind.startswith('Ijk_Sys'):
state.ip = state.unicorn._syscall_pc
successors.add_successor(state, state.ip, state.solver.true, state.unicorn.jumpkind)
successors.description = description
successors.processed = True
|
angr/angr
|
angr/engines/unicorn.py
|
Python
|
bsd-2-clause
| 16,522
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import re
import logging
import requests
from indra.literature import pubmed_client
# Python3
try:
from functools import lru_cache
# Python2
except ImportError:
from functools32 import lru_cache
logger = logging.getLogger('crossref')
crossref_url = 'http://api.crossref.org/'
crossref_search_url = 'http://search.crossref.org/dois'
# THIS FILE IS NOT UNDER VERSION CONTROL
# For more information see:
# http://clickthroughsupport.crossref.org/click-through-service-for-researchers/
api_key_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'cr_clickthrough_key')
# Read the API key
api_key = None
try:
with open(api_key_file, 'rt') as fh:
api_key = fh.read().strip()
except IOError:
logger.warning('CrossRef Clickthrough API key could not be found at:')
logger.warning(api_key_file)
api_key = None
@lru_cache(maxsize=100)
def get_metadata(doi):
"""Returns the metadata of an article given its DOI from CrossRef
as a JSON dict"""
url = crossref_url + 'works/' + doi
res = requests.get(url)
if res.status_code != 200:
logger.info('Could not get CrossRef metadata for DOI %s, code %d' %
(doi, res.status_code))
return None
raw_message = res.json()
metadata = raw_message.get('message')
return metadata
def get_fulltext_links(doi):
"""Return a list of links to the full text of an article given its DOI.
Each list entry is a dictionary with keys:
- URL: the URL to the full text
- content-type: e.g. text/xml or text/plain
- content-version
- intended-application: e.g. text-mining
"""
metadata = get_metadata(doi)
if metadata is None:
return None
links = metadata.get('link')
return links
def get_publisher(doi):
metadata = get_metadata(doi)
if metadata is None:
return None
publisher = metadata.get('publisher')
return publisher
def get_url(doi):
metadata = get_metadata(doi)
if metadata is None:
return None
url = metadata.get('URL')
return url
def get_license_links(doi):
metadata = get_metadata(doi)
if metadata is None:
return None
licenses = metadata.get('license')
if licenses is None:
return None
urls = [l.get('URL') for l in licenses]
return urls
def doi_query(pmid, search_limit=10):
"""Get the DOI for a PMID by matching CrossRef and Pubmed metadata.
Searches CrossRef using the article title and then accepts search hits only
if they have a matching journal ISSN and page number with what is obtained
from the Pubmed database.
"""
# Get article metadata from PubMed
pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid],
get_issns_from_nlm=True)
if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None:
logger.warning('No metadata found in Pubmed for PMID%s' % pmid)
return None
# The test above ensures we've got this now
pubmed_meta = pubmed_meta_dict[pmid]
# Check if we already got a DOI from Pubmed itself!
if pubmed_meta.get('doi'):
return pubmed_meta.get('doi')
# Check for the title, which we'll need for the CrossRef search
pm_article_title = pubmed_meta.get('title')
if pm_article_title is None:
logger.warning('No article title found in Pubmed for PMID%s' % pmid)
return None
# Get the ISSN list
pm_issn_list = pubmed_meta.get('issn_list')
if not pm_issn_list:
logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid)
return None
# Get the page number
pm_page = pubmed_meta.get('page')
if not pm_page:
logger.debug('No page number found in Pubmed for PMID%s' % pmid)
return None
# Now query CrossRef using the title we've got
url = crossref_search_url
params = {'q': pm_article_title, 'sort': 'score'}
try:
res = requests.get(crossref_search_url, params)
except requests.exceptions.ConnectionError as e:
logger.error('CrossRef service could not be reached.')
logger.error(e)
return None
if res.status_code != 200:
logger.info('PMID%s: no search results from CrossRef, code %d' %
(pmid, res.status_code))
return None
raw_message = res.json()
mapped_doi = None
# Iterate over the search results, looking up XREF metadata
for result_ix, result in enumerate(raw_message):
if result_ix > search_limit:
logger.info('PMID%s: No match found within first %s results, '
'giving up!' % (pmid, search_limit))
break
xref_doi_url = result['doi']
# Strip the URL prefix off of the DOI
m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url)
xref_doi = m.groups()[0]
# Get the XREF metadata using the DOI
xref_meta = get_metadata(xref_doi)
if xref_meta is None:
continue
xref_issn_list = xref_meta.get('ISSN')
xref_page = xref_meta.get('page')
# If there's no ISSN info for this article, skip to the next result
if not xref_issn_list:
logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url)
continue
# If there's no page info for this article, skip to the next result
if not xref_page:
logger.debug('No page number found for DOI %s, skipping' %
xref_doi_url)
continue
# Now check for an ISSN match by looking for the set intersection
# between the Pubmed ISSN list and the CrossRef ISSN list.
matching_issns = set(pm_issn_list).intersection(set(xref_issn_list))
# Before comparing page numbers, regularize the page numbers a bit.
# Note that we only compare the first page number, since frequently
# the final page number will simply be missing in one of the data
# sources. We also canonicalize page numbers of the form '14E' to
# 'E14' (which is the format used by Pubmed).
pm_start_page = pm_page.split('-')[0].upper()
xr_start_page = xref_page.split('-')[0].upper()
if xr_start_page.endswith('E'):
xr_start_page = 'E' + xr_start_page[:-1]
# Now compare the ISSN list and page numbers
if matching_issns and pm_start_page == xr_start_page:
# We found a match!
mapped_doi = xref_doi
break
# Otherwise, keep looking through the results...
# Return a DOI, or None if we didn't find one that met our matching
# criteria
return mapped_doi
|
jmuhlich/indra
|
indra/literature/crossref_client.py
|
Python
|
bsd-2-clause
| 6,819
|
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
ext = Extension("CyNewtonRaphson",sources=["CyNewtonRaphson.pyx"],include_dirs=[numpy.get_include()])
setup(ext_modules=[ext],cmdclass={'build_ext' :build_ext},include_dirs=[numpy.get_include()],compiler_directives={'boundscheck': False,'wraparound':False})
|
tmaiwald/OSIM
|
OSIM/Simulation/CyCircuitAnalysis/setup.py
|
Python
|
bsd-2-clause
| 491
|
import ctypes as ct
import sdl2 as sdl
from ed2d.platforms import sdl2keymap as keymap
from ed2d.events import Events
from ed2d import cursor
MOUSE_LEFT = 1
MOUSE_MIDDLE = 2
MOUSE_RIGHT = 3
MOUSE_EX1 = 4
MOUSE_EX2 = 5
class SystemEvents(object):
''' Handles Event stuff... '''
def __init__(self):
sdl.SDL_InitSubSystem(sdl.SDL_INIT_EVENTS)
def process(self):
'''
Processes the events polled from sdl.
Custom events might be a possiblility if we need them.
'''
event = sdl.SDL_Event()
while sdl.SDL_PollEvent(ct.byref(event)):
eventName = None
data = None
if event.type == sdl.SDL_QUIT:
eventName = 'quit'
data = ()
elif event.type == sdl.SDL_MOUSEMOTION:
eventName = 'mouse_move'
if cursor.is_relative():
data = (event.motion.xrel, event.motion.yrel)
else:
data = (event.motion.x, event.motion.y)
elif event.type == sdl.SDL_WINDOWEVENT:
winEvent = event.window
wEventName = event.window.event
# For now this will only support one window
# If we want two later on then we can do it then.
if wEventName == sdl.SDL_WINDOWEVENT_CLOSE:
eventName = 'window_close'
data = (winEvent.windowID)
elif wEventName == sdl.SDL_WINDOWEVENT_RESIZED:
eventName = 'window_resized'
data = (winEvent.windowID, winEvent.data1, winEvent.data2)
elif event.type == sdl.SDL_KEYUP:
if not event.key.repeat:
eventName = 'key_up'
keyID = keymap.keymap[event.key.keysym.scancode]
keyName = keymap.process_key_char(event.key.keysym.sym)
modKeys = keymap.process_modkeys(event.key.keysym.mod)
data = (keyName, keyID, modKeys)
elif event.type == sdl.SDL_KEYDOWN:
if not event.key.repeat:
eventName = 'key_down'
keyID = keymap.keymap[event.key.keysym.scancode]
keyName = keymap.process_key_char(event.key.keysym.sym)
modKeys = keymap.process_modkeys(event.key.keysym.mod)
data = (keyName, keyID, modKeys)
elif event.type == sdl.SDL_MOUSEBUTTONUP:
eventName = 'mouse_button_up'
data = (event.button.button, event.button.clicks, event.button.x, event.button.y)
elif event.type == sdl.SDL_MOUSEBUTTONDOWN:
eventName = 'mouse_button_down'
data = (event.button.button, event.button.clicks, event.button.x, event.button.y)
else:
# Will add more event types later
pass
if eventName is not None:
Events.broadcast_event(eventName, data)
__all__ = ['Events']
|
explosiveduck/ed2d
|
ed2d/platforms/sdl2events.py
|
Python
|
bsd-2-clause
| 3,068
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
spider_list = """douyu lol 频道
拉勾数据
网易公开课
果壳 mooc 公开课
网易大学
leetcode
other oj
"""
|
sharkspeed/dororis
|
packages/python/scrapy/example0/example0/spiders/spider_list.py
|
Python
|
bsd-2-clause
| 167
|
"""
@package mi.dataset.driver.dosta_abcdjm.cspp.test.test_driver
@file marine-integrations/mi/dataset/driver/dosta_abcdjm/cspp/driver.py
@author Mark Worden
@brief Test cases for dosta_abcdjm_cspp driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
import hashlib
import os
from nose.plugins.attrib import attr
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from mi.core.log import get_logger
log = get_logger()
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import \
DataSourceConfigKey, \
DataSetDriverConfigKeys, \
DriverParameter
from mi.dataset.driver.dosta_abcdjm.cspp.driver import DostaAbcdjmCsppDataSetDriver, DataTypeKey
from mi.dataset.parser.cspp_base import StateKey
from mi.dataset.parser.dosta_abcdjm_cspp import \
DostaAbcdjmCsppInstrumentRecoveredDataParticle, \
DostaAbcdjmCsppInstrumentTelemeteredDataParticle, \
DostaAbcdjmCsppMetadataRecoveredDataParticle, \
DostaAbcdjmCsppMetadataTelemeteredDataParticle, \
DataParticleType
DIR_REC = '/tmp/dsatest_rec'
DIR_TEL = '/tmp/dsatest_tel'
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.dosta_abcdjm.cspp.driver',
driver_class='DostaAbcdjmCsppDataSetDriver',
agent_resource_id='123xyz',
agent_name='Agent007',
agent_packet_config=DostaAbcdjmCsppDataSetDriver.stream_config(),
startup_config={
DataSourceConfigKey.RESOURCE_ID: 'dosta_abcdjm_cspp',
DataSourceConfigKey.HARVESTER:
{
DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED:
{
DataSetDriverConfigKeys.DIRECTORY: DIR_REC,
DataSetDriverConfigKeys.PATTERN: '*_PPB_OPT.txt',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataTypeKey.DOSTA_ABCDJM_CSPP_TELEMETERED:
{
DataSetDriverConfigKeys.DIRECTORY: DIR_TEL,
DataSetDriverConfigKeys.PATTERN: '*_PPD_OPT.txt',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
},
DataSourceConfigKey.PARSER: {
DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED: {},
DataTypeKey.DOSTA_ABCDJM_CSPP_TELEMETERED: {},
}
}
)
REC_PARTICLES = (DostaAbcdjmCsppMetadataRecoveredDataParticle,
DostaAbcdjmCsppInstrumentRecoveredDataParticle)
TEL_PARTICLES = (DostaAbcdjmCsppMetadataTelemeteredDataParticle,
DostaAbcdjmCsppInstrumentTelemeteredDataParticle)
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def get_file_state(self, path, ingested=False, position=None, metadata_extracted=False):
"""
Create a parser state object for a file and return it.
"""
if position is None:
position = 0
mod_time = os.path.getmtime(path)
file_size = os.path.getsize(path)
with open(path) as filehandle:
md5_checksum = hashlib.md5(filehandle.read()).hexdigest()
parser_state = {
StateKey.POSITION: position,
StateKey.METADATA_EXTRACTED: metadata_extracted
}
return {
'ingested': ingested,
'file_mod_date': mod_time,
'file_checksum': md5_checksum,
'file_size': file_size,
'parser_state': parser_state
}
def test_get(self):
"""
Test that we can get data from files.
"""
# Clear the asynchronous callback results
self.clear_async_data()
# Notify the driver to start sampling
self.driver.start_sampling()
# Test simple recovered data handling
self.create_sample_data_set_dir('11079419_PPB_OPT.txt', DIR_REC)
self.assert_data(REC_PARTICLES, '11079419_PPB_OPT.yml', count=20, timeout=10)
# Test simple telemetered data handling
self.create_sample_data_set_dir('11194982_PPD_OPT.txt', DIR_TEL)
self.assert_data(TEL_PARTICLES, '11194982_PPD_OPT.yml', count=18, timeout=10)
def test_midstate_start(self):
"""
Test the ability to stop and restart the process
"""
recovered_file_one = '11079419_PPB_OPT.txt'
telemetered_file_one = '11194982_PPD_OPT.txt'
# Clear any existing sampling
self.clear_sample_data()
recovered_path_1 = self.create_sample_data_set_dir(recovered_file_one, DIR_REC)
telemetered_path_1 = self.create_sample_data_set_dir(telemetered_file_one, DIR_TEL)
state = {
DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED: {
recovered_file_one: self.get_file_state(recovered_path_1,
ingested=False,
position=521,
metadata_extracted=True),
},
DataTypeKey.DOSTA_ABCDJM_CSPP_TELEMETERED: {
telemetered_file_one: self.get_file_state(telemetered_path_1,
ingested=False,
position=433,
metadata_extracted=True),
}
}
driver = self._get_driver_object(memento=state)
# create some data to parse
self.clear_async_data()
driver.start_sampling()
# verify data is produced
self.assert_data(REC_PARTICLES, 'test_recovered_midstate_start.yml', count=1, timeout=10)
self.assert_data(TEL_PARTICLES, 'test_telemetered_midstate_start.yml', count=2, timeout=10)
def test_start_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
recovered_file_one = '11079419_PPB_OPT.txt'
telemetered_file_one = '11194982_PPD_OPT.txt'
# Clear any existing sampling
self.clear_sample_data()
self.create_sample_data_set_dir(recovered_file_one, DIR_REC)
self.create_sample_data_set_dir(telemetered_file_one, DIR_TEL)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data(REC_PARTICLES, 'test_recovered_start_stop_resume_one.yml', count=1, timeout=10)
self.assert_data(TEL_PARTICLES, 'test_telemetered_start_stop_resume_one.yml', count=1, timeout=10)
self.driver.stop_sampling()
self.driver.start_sampling()
# verify data is produced
self.assert_data(REC_PARTICLES, 'test_recovered_start_stop_resume_two.yml', count=4, timeout=10)
self.assert_data(TEL_PARTICLES, 'test_telemetered_start_stop_resume_two.yml', count=4, timeout=10)
self.driver.stop_sampling()
def test_sample_exception(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs
"""
# Start sampling.
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data_set_dir('BadDataRecord_PPB_OPT.txt', DIR_REC)
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
log.info("=========== START QUAL TEST PUBLISH PATH =================")
self.create_sample_data_set_dir('11079419_PPB_OPT.txt', DIR_REC)
self.create_sample_data_set_dir('11194982_PPD_OPT.txt', DIR_TEL)
self.assert_initialize()
# get the recovered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1, 10)
#get the recovered instrument particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 19, 40)
# combine the results
result1.extend(result2)
# check the results
self.assert_data_values(result1, '11079419_PPB_OPT.yml')
# get the telemetered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1, 10)
# get the telemetered insturment particle
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 17, 40)
# combine the results
result1.extend(result2)
# check the results
self.assert_data_values(result1, '11194982_PPD_OPT.yml')
def test_large_import(self):
"""
Test importing a large number of samples from the file at once
"""
log.info("=========== START QUAL TEST LARGE IMPORT =================")
# using the same file for both telemetered and recovered because
# there are no large telemetered files available at this time
self.create_sample_data_set_dir('11079364_PPB_OPT.txt', DIR_REC)
self.create_sample_data_set_dir('11079364_PPB_OPT.txt', DIR_TEL, '11079364_PPD_OPT.txt')
self.assert_initialize()
# get the recovered metadata particle
self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1, 10)
#get the recovered instrument particles
self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 271, 40)
# get the telemetered metadata particle
self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1, 20)
# # get the telemetered insturment particle
self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 271, 40)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("=========== START QUAL TEST STOP START =================")
self.create_sample_data_set_dir('11079419_PPB_OPT.txt', DIR_REC)
self.create_sample_data_set_dir('11194982_PPD_OPT.txt', DIR_TEL)
#put the driver in command mode so it can be started and stopped
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.dataset_agent_client.set_resource(
{DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# get the recovered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1, 10)
#get the first 5 recovered instrument particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 5, 40)
# combine the results
result1.extend(result2)
# check the results
self.assert_data_values(result1, 'test_recovered_stop_start_one.yml')
# get the telemetered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1, 10)
# get the first 9 telemetered insturment particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 9, 40)
# combine the results
result1.extend(result2)
# check the results
self.assert_data_values(result1, 'test_telemetered_stop_start_one.yml')
# stop sampling
self.assert_stop_sampling()
#restart sampling
self.assert_start_sampling()
#get the next 8 recovered instrument particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 8, 40)
self.assert_data_values(result2, 'test_recovered_stop_start_two.yml')
# get the next 4 telemetered insturment particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 4, 40)
self.assert_data_values(result2, 'test_telemetered_stop_start_two.yml')
def test_shutdown_restart(self):
"""
Test a full stop of the dataset agent, then restart the agent
and confirm it restarts at the correct spot.
"""
log.info("========== START QUAL TEST SHUTDOWN RESTART ===============")
self.create_sample_data_set_dir('11079419_PPB_OPT.txt', DIR_REC)
self.create_sample_data_set_dir('11194982_PPD_OPT.txt', DIR_TEL)
#put the driver in command mode so it can be started and stopped
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.dataset_agent_client.set_resource(
{DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# get the recovered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1, 10)
#get the first 5 recovered instrument particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 5, 40)
# combine the results
result1.extend(result2)
# check the results
self.assert_data_values(result1, 'test_recovered_stop_start_one.yml')
# get the telemetered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1, 10)
# get the first 9 telemetered insturment particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 9, 40)
# combine the results
result1.extend(result2)
# check the results
self.assert_data_values(result1, 'test_telemetered_stop_start_one.yml')
# stop sampling
self.assert_stop_sampling()
self.stop_dataset_agent_client()
# Re-start the agent
self.init_dataset_agent_client()
# Re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
#restart sampling
self.assert_start_sampling()
#get the next 8 recovered instrument particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 8, 40)
self.assert_data_values(result2, 'test_recovered_stop_start_two.yml')
# get the next 4 telemetered insturment particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 4, 40)
self.assert_data_values(result2, 'test_telemetered_stop_start_two.yml')
def test_shutdown_restart_2(self):
"""
Test a full stop of the dataset agent, then restart the agent
and confirm it restarts at the correct spot.
This tests verifies that the parser will restart correctly if only the meta data particle
was retrieved before being shutdown
"""
log.info("========== START QUAL TEST SHUTDOWN RESTART 2 ===============")
self.create_sample_data_set_dir('11079419_PPB_OPT.txt', DIR_REC)
self.create_sample_data_set_dir('11194982_PPD_OPT.txt', DIR_TEL)
#put the driver in command mode so it can be started and stopped
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.dataset_agent_client.set_resource(
{DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# get the recovered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1, 10)
# check the results
self.assert_data_values(result1, 'test_recovered_stop_meta_one.yml')
# get the telemetered metadata particle
result1 = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1, 10)
# check the results
self.assert_data_values(result1, 'test_telemetered_stop_meta_one.yml')
# stop sampling
self.assert_stop_sampling()
self.stop_dataset_agent_client()
# Re-start the agent
self.init_dataset_agent_client()
# Re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
#restart sampling
self.assert_start_sampling()
# verify the metadata queues are empty
self.assert_sample_queue_size(DataParticleType.METADATA_RECOVERED, 0)
self.assert_sample_queue_size(DataParticleType.METADATA_TELEMETERED, 0)
#get the first 5 recovered instrument particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_RECOVERED, 5, 40)
# check the results
self.assert_data_values(result2, 'test_recovered_stop_meta_two.yml')
# get the first 9 telemetered insturment particles
result2 = self.data_subscribers.get_samples(DataParticleType.INSTRUMENT_TELEMETERED, 9, 40)
# check the results
self.assert_data_values(result2, 'test_telemetered_stop_meta_two.yml')
def test_parser_exception(self):
"""
Test an exception is raised after the driver is started during
record parsing.
"""
log.info("=========== START QUAL TEST PARSER EXCEPTION =================")
self.create_sample_data_set_dir('BadDataRecord_PPB_OPT.txt', DIR_REC)
self.assert_initialize()
self.assert_event_received(ResourceAgentErrorEvent, 10)
|
ooici/marine-integrations
|
mi/dataset/driver/dosta_abcdjm/cspp/test/test_driver.py
|
Python
|
bsd-2-clause
| 18,459
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, print_function, division,
absolute_import)
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content', 'author')
|
roam/machete
|
tests/forms.py
|
Python
|
bsd-2-clause
| 306
|
# coding: utf-8
from django.db import migrations, models
import onadata.apps.logger.fields
class Migration(migrations.Migration):
dependencies = [
('logger', '0008_add_instance_is_synced_with_mongo_and_xform_has_kpi_hooks'),
]
operations = [
migrations.AddField(
model_name='instance',
name='posted_to_kpi',
field=onadata.apps.logger.fields.LazyDefaultBooleanField(default=False),
),
]
|
kobotoolbox/kobocat
|
onadata/apps/logger/migrations/0009_add_posted_to_kpi_field_to_logger_instance.py
|
Python
|
bsd-2-clause
| 466
|
# -*- coding: utf-8 -*-
"""
S-expr parser, a LISP front-end for Python
"""
import re
class Parser(object):
RE_NUM = re.compile(r'^[+-]?[0-9]+(\.[0-9]+)?')
RE_STR = re.compile(r'^\"(\\\\|\\\"|[^"])*\"', re.DOTALL)
RE_SYM = re.compile(r'^[^0-9\s\(\)\[\]{}\",][^\s\(\)\[\]{}\",]*')
RE_LITERAL = re.compile(r'^<!\[([a-zA-Z0-9-_]*)\[(.*)\]\]>?', re.DOTALL)
RE_BLANK = re.compile(r'^[\s,]*(;[^\r\n]*[\r\n\s,]+)*', re.DOTALL)
PARN = {
'(' : ('tuple', ')'),
'[' : ('list', ']'),
'{' : ('dict', '}')}
def __init__(self, prefixes, types):
self.prefixes = prefixes
self.types = types
@staticmethod
def next(src, pattern):
if isinstance(pattern, basestring):
if src.startswith(pattern):
return (pattern,), src[len(pattern):]
else:
return None, src
else:
m = pattern.search(src)
if m is not None and m.start() == 0:
return (src[:m.end()],) + m.groups(), src[m.end():]
else:
return None, src
def parse(self, src):
# Clear blank and comment
unused_match, src = Parser.next(src, Parser.RE_BLANK)
if not src:
raise StopIteration
if src[0] in Parser.PARN:
typename, endch = Parser.PARN[src[0]]
unused, src = Parser.next(src[1:], Parser.RE_BLANK)
output = []
while src and src[0] != endch:
elim, src = self.parse(src)
output.append(elim)
unused, src = Parser.next(src, Parser.RE_BLANK)
if not src:
raise SyntaxError, src
return self.types[typename](output), src[1:]
# prefixes char (quote etc)
if src[0] in self.prefixes and (
len(src) > 1 and src[1] not in ' \t\r\n'):
prefix_func = self.prefixes[src[0]]
ret, src = self.parse(src[1:])
return prefix_func(ret), src
m, src = Parser.next(src, Parser.RE_LITERAL)
if m is not None:
unused, typename, content = m
return self.types[typename](content), src
m, src = Parser.next(src, Parser.RE_STR)
if m is not None:
return self.types['str'](m[0]), src
m, src = Parser.next(src, Parser.RE_NUM)
if m is not None:
return self.types['num'](m[0]), src
# OK, it will be symbol hah?
m, src= Parser.next(src, Parser.RE_SYM)
if m is not None:
return self.types['sym'](m[0]), src
raise SyntaxError, src[:80]
def parseall(self, src):
output = []
while True:
try:
obj, src = self.parse(src)
output.append(obj)
except StopIteration:
break
return output
if __name__ == '__main__':
import sys
parser = Parser(
prefixes = {'\'' : lambda x: ('QUOTE', x)},
types = {
'num' : eval,
'str' : eval,
'sym' : lambda x: '<SYM %s>' % (x,),
'' : lambda x: x,
'py' : lambda x: '<PY %s>' % (x,),
'tuple' : tuple,
'list' : lambda x: ('list',) + tuple(x),
'dict' : lambda x: ('dict',) + tuple(x)}
)
print parser.parseall(sys.stdin.read())
|
kuangyh/chord
|
src/solo/sexpr.py
|
Python
|
bsd-2-clause
| 3,554
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
import socket
import sys
from pysnmp import error
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.carrier.asyncore.dgram import udp6
from pysnmp.hlapi.transport import AbstractTransportTarget
__all__ = ['Udp6TransportTarget', 'UdpTransportTarget']
class UdpTransportTarget(AbstractTransportTarget):
"""Represent UDP/IPv4 transport endpoint.
This object can be used for passing UDP/IPv4 configuration information
to the low-level SNMP applications.
See :RFC:`1906#section-3` for more information on the UDP transport mapping.
Parameters
----------
transportAddr: :py:class:`tuple`
Indicates remote address in Python :py:mod:`socket` module format
which is a tuple of FQDN, port where FQDN is a string representing
either hostname or IPv4 address in quad-dotted form, port is an
integer.
timeout: :py:class:`int`
Response timeout in seconds.
retries: :py:class:`int`
Maximum number of request retries, 0 retries means just a single
request.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import UdpTransportTarget
>>> UdpTransportTarget(('demo.snmplabs.com', 161))
UdpTransportTarget(('195.218.195.228', 161), timeout=1, retries=5)
>>>
"""
TRANSPORT_DOMAIN = udp.DOMAIN_NAME
PROTO_TRANSPORT = udp.UdpSocketTransport
def _resolveAddr(self, transportAddr):
try:
return socket.getaddrinfo(transportAddr[0],
transportAddr[1],
socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)[0][4][:2]
except socket.gaierror as exc:
raise error.PySnmpError('Bad IPv4/UDP transport address %s: %s' % (
'@'.join([str(x) for x in transportAddr]), exc))
class Udp6TransportTarget(AbstractTransportTarget):
"""Represent UDP/IPv6 transport endpoint.
This object can be used for passing UDP/IPv4 configuration information
to the low-level SNMP applications.
See :RFC:`1906#section-3`, :RFC:`2851#section-4` for more information
on the UDP and IPv6 transport mapping.
Parameters
----------
transportAddr : tuple
Indicates remote address in Python :py:mod:`socket` module format
which is a tuple of FQDN, port where FQDN is a string representing
either hostname or IPv6 address in one of three conventional forms
(:RFC:`1924#section-3`), port is an integer.
timeout : int
Response timeout in seconds.
retries : int
Maximum number of request retries, 0 retries means just a single
request.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import Udp6TransportTarget
>>> Udp6TransportTarget(('google.com', 161))
Udp6TransportTarget(('2a00:1450:4014:80a::100e', 161), timeout=1, retries=5)
>>> Udp6TransportTarget(('FEDC:BA98:7654:3210:FEDC:BA98:7654:3210', 161))
Udp6TransportTarget(('fedc:ba98:7654:3210:fedc:ba98:7654:3210', 161), timeout=1, retries=5)
>>> Udp6TransportTarget(('1080:0:0:0:8:800:200C:417A', 161))
Udp6TransportTarget(('1080::8:800:200c:417a', 161), timeout=1, retries=5)
>>> Udp6TransportTarget(('::0', 161))
Udp6TransportTarget(('::', 161), timeout=1, retries=5)
>>> Udp6TransportTarget(('::', 161))
Udp6TransportTarget(('::', 161), timeout=1, retries=5)
>>>
"""
TRANSPORT_DOMAIN = udp6.DOMAIN_NAME
PROTO_TRANSPORT = udp6.Udp6SocketTransport
def _resolveAddr(self, transportAddr):
try:
return socket.getaddrinfo(transportAddr[0],
transportAddr[1],
socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)[0][4][:2]
except socket.gaierror as exc:
raise error.PySnmpError('Bad IPv6/UDP transport address %s: %s' % (
'@'.join([str(x) for x in transportAddr]), exc))
|
etingof/pysnmp
|
pysnmp/hlapi/v1arch/asyncore/transport.py
|
Python
|
bsd-2-clause
| 4,270
|
from django.conf import settings
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Drop and re-create the database"
def handle_noargs(self, **options):
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql_psycopg2':
self.pgsql_handle_noargs(**options)
else:
self.mysql_handle_noargs(**options)
def pgsql_handle_noargs(self, **options):
import psycopg2
db = psycopg2.connect(database='postgres', #settings.DATABASES['default']['NAME'],
host=settings.DATABASES['default']['HOST'],
user=settings.DATABASES['default']['USER'],
password=settings.DATABASES['default']['PASSWORD'])
cur = db.cursor()
cur.execute("drop database %s; create database %s;" % (
settings.DATABASES['default']['NAME'], settings.DATABASES['default']['NAME']))
print "Dropped"
def mysql_handle_noargs(self, **options):
import MySQLdb
print "Connecting..."
db = MySQLdb.connect(
host=settings.DATABASES['default']['HOST'] or "localhost",
user=settings.DATABASES['default']['USER'],
passwd=settings.DATABASES['default']['PASSWORD'],
port=int(settings.DATABASES['default']['PORT'] or 3306))
cursor = db.cursor()
print "Dropping database %s" % settings.DATABASES['default']['NAME']
cursor.execute("drop database %s; create database %s;" % (
settings.DATABASES['default']['NAME'], settings.DATABASES['default']['NAME']))
print "Dropped"
|
fergalmoran/dss
|
spa/management/commands/drop.py
|
Python
|
bsd-2-clause
| 1,735
|
#!/usr/bin/env python
from distutils.core import setup, Extension
setup(
name = "h264decode",
version = "0.1",
description = "Module for simple decoding of raw H264 streams with external avcC (ISO/IEC 14496:15)",
ext_modules = [Extension( "h264decode",
sources=["yuvframe.c", "decoder.c", "h264decode.c"],
libraries=["avcodec", "avutil"],
library_dirs=["/usr/local/lib", "/usr/lib"])]
)
|
tzwenn/pyh264decode
|
setup.py
|
Python
|
bsd-2-clause
| 417
|
from unipath import FSPath as Path
from djangobench.base_settings import *
INSTALLED_APPS = ['template_render']
TEMPLATE_DIRS = [Path(__file__).parent.child('templates').absolute()]
ROOT_URLCONF = 'template_render.urls'
|
alex/djangobench
|
djangobench/benchmarks/template_render/settings.py
|
Python
|
bsd-3-clause
| 221
|
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
is_platform_arm,
is_platform_mac,
)
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
MultiIndex,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
from pandas.core.window import Rolling
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.rolling(2).sum()
df.rolling(2, min_periods=1).sum()
def test_constructor(frame_or_series):
# GH 12669
c = frame_or_series(range(5)).rolling
# valid
c(0)
c(window=2)
c(window=2, min_periods=1)
c(window=2, min_periods=1, center=True)
c(window=2, min_periods=1, center=False)
# GH 13383
msg = "window must be an integer 0 or greater"
with pytest.raises(ValueError, match=msg):
c(-1)
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
def test_invalid_constructor(frame_or_series, w):
# not valid
c = frame_or_series(range(5)).rolling
msg = (
"window must be an integer|"
"passed window foo is not compatible with a datetimelike index"
)
with pytest.raises(ValueError, match=msg):
c(window=w)
msg = "min_periods must be an integer"
with pytest.raises(ValueError, match=msg):
c(window=2, min_periods=w)
msg = "center must be a boolean"
with pytest.raises(ValueError, match=msg):
c(window=2, min_periods=1, center=w)
@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3)])
def test_constructor_with_timedelta_window(window):
# GH 15440
n = 10
df = DataFrame(
{"value": np.arange(n)},
index=date_range("2015-12-24", periods=n, freq="D"),
)
expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))
result = df.rolling(window=window).sum()
expected = DataFrame(
{"value": expected_data},
index=date_range("2015-12-24", periods=n, freq="D"),
)
tm.assert_frame_equal(result, expected)
expected = df.rolling("3D").sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3), "3D"])
def test_constructor_timedelta_window_and_minperiods(window, raw):
# GH 15305
n = 10
df = DataFrame(
{"value": np.arange(n)},
index=date_range("2017-08-08", periods=n, freq="D"),
)
expected = DataFrame(
{"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},
index=date_range("2017-08-08", periods=n, freq="D"),
)
result_roll_sum = df.rolling(window=window, min_periods=2).sum()
result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw)
tm.assert_frame_equal(result_roll_sum, expected)
tm.assert_frame_equal(result_roll_generic, expected)
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(method):
# see gh-12811
r = Rolling(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, method)(dtype=np.float64)
def test_closed_fixed(closed, arithmetic_win_operators):
# GH 34315
func_name = arithmetic_win_operators
df_fixed = DataFrame({"A": [0, 1, 2, 3, 4]})
df_time = DataFrame({"A": [0, 1, 2, 3, 4]}, index=date_range("2020", periods=5))
result = getattr(
df_fixed.rolling(2, closed=closed, min_periods=1),
func_name,
)()
expected = getattr(
df_time.rolling("2D", closed=closed, min_periods=1),
func_name,
)().reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"closed, window_selections",
[
(
"both",
[
[True, True, False, False, False],
[True, True, True, False, False],
[False, True, True, True, False],
[False, False, True, True, True],
[False, False, False, True, True],
],
),
(
"left",
[
[True, False, False, False, False],
[True, True, False, False, False],
[False, True, True, False, False],
[False, False, True, True, False],
[False, False, False, True, True],
],
),
(
"right",
[
[True, True, False, False, False],
[False, True, True, False, False],
[False, False, True, True, False],
[False, False, False, True, True],
[False, False, False, False, True],
],
),
(
"neither",
[
[True, False, False, False, False],
[False, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
],
),
],
)
def test_datetimelike_centered_selections(
closed, window_selections, arithmetic_win_operators
):
# GH 34315
func_name = arithmetic_win_operators
df_time = DataFrame(
{"A": [0.0, 1.0, 2.0, 3.0, 4.0]}, index=date_range("2020", periods=5)
)
expected = DataFrame(
{"A": [getattr(df_time["A"].iloc[s], func_name)() for s in window_selections]},
index=date_range("2020", periods=5),
)
if func_name == "sem":
kwargs = {"ddof": 0}
else:
kwargs = {}
result = getattr(
df_time.rolling("2D", closed=closed, min_periods=1, center=True),
func_name,
)(**kwargs)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_even_number_window_alignment():
# see discussion in GH 38780
s = Series(range(3), index=date_range(start="2020-01-01", freq="D", periods=3))
# behavior of index- and datetime-based windows differs here!
# s.rolling(window=2, min_periods=1, center=True).mean()
result = s.rolling(window="2D", min_periods=1, center=True).mean()
expected = Series([0.5, 1.5, 2], index=s.index)
tm.assert_series_equal(result, expected)
def test_closed_fixed_binary_col(center):
# GH 34315
data = [0, 1, 1, 0, 0, 1, 0, 1]
df = DataFrame(
{"binary_col": data},
index=date_range(start="2020-01-01", freq="min", periods=len(data)),
)
if center:
expected_data = [2 / 3, 0.5, 0.4, 0.5, 0.428571, 0.5, 0.571429, 0.5]
else:
expected_data = [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571]
expected = DataFrame(
expected_data,
columns=["binary_col"],
index=date_range(start="2020-01-01", freq="min", periods=len(expected_data)),
)
rolling = df.rolling(window=len(df), closed="left", min_periods=1, center=center)
result = rolling.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("closed", ["neither", "left"])
def test_closed_empty(closed, arithmetic_win_operators):
# GH 26005
func_name = arithmetic_win_operators
ser = Series(data=np.arange(5), index=date_range("2000", periods=5, freq="2D"))
roll = ser.rolling("1D", closed=closed)
result = getattr(roll, func_name)()
expected = Series([np.nan] * 5, index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_closed_one_entry(func):
# GH24718
ser = Series(data=[2], index=date_range("2000", periods=1))
result = getattr(ser.rolling("10D", closed="left"), func)()
tm.assert_series_equal(result, Series([np.nan], index=ser.index))
@pytest.mark.parametrize("func", ["min", "max"])
def test_closed_one_entry_groupby(func):
# GH24718
ser = DataFrame(
data={"A": [1, 1, 2], "B": [3, 2, 1]},
index=date_range("2000", periods=3),
)
result = getattr(
ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func
)()
exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=("A", None))
expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("input_dtype", ["int", "float"])
@pytest.mark.parametrize(
"func,closed,expected",
[
("min", "right", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
("min", "both", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),
("min", "neither", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
("min", "left", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),
("max", "right", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
("max", "both", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
("max", "neither", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),
("max", "left", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),
],
)
def test_closed_min_max_datetime(input_dtype, func, closed, expected):
# see gh-21704
ser = Series(
data=np.arange(10).astype(input_dtype),
index=date_range("2000", periods=10),
)
result = getattr(ser.rolling("3D", closed=closed), func)()
expected = Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
def test_closed_uneven():
# see gh-21704
ser = Series(data=np.arange(10), index=date_range("2000", periods=10))
# uneven
ser = ser.drop(index=ser.index[[1, 5]])
result = ser.rolling("3D", closed="left").min()
expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func,closed,expected",
[
("min", "right", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),
("min", "both", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),
("min", "neither", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),
("min", "left", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),
("max", "right", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),
("max", "both", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),
("max", "neither", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),
("max", "left", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]),
],
)
def test_closed_min_max_minp(func, closed, expected):
# see gh-21704
ser = Series(data=np.arange(10), index=date_range("2000", periods=10))
ser[ser.index[-3:]] = np.nan
result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)()
expected = Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"closed,expected",
[
("right", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),
("both", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),
("neither", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),
("left", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]),
],
)
def test_closed_median_quantile(closed, expected):
# GH 26005
ser = Series(data=np.arange(10), index=date_range("2000", periods=10))
roll = ser.rolling("3D", closed=closed)
expected = Series(expected, index=ser.index)
result = roll.median()
tm.assert_series_equal(result, expected)
result = roll.quantile(0.5)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("roller", ["1s", 1])
def tests_empty_df_rolling(roller):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
# empty DataFrames with datetime index
expected = DataFrame(index=DatetimeIndex([]))
result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum()
tm.assert_frame_equal(result, expected)
def test_empty_window_median_quantile():
# GH 26005
expected = Series([np.nan, np.nan, np.nan])
roll = Series(np.arange(3)).rolling(0)
result = roll.median()
tm.assert_series_equal(result, expected)
result = roll.quantile(0.1)
tm.assert_series_equal(result, expected)
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = Series([np.nan])
result = x.rolling(1, min_periods=0).sum()
expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.rolling(1, min_periods=1).sum()
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_missing_minp_zero_variable():
# https://github.com/pandas-dev/pandas/pull/18921
x = Series(
[np.nan] * 4,
index=DatetimeIndex(["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"]),
)
result = x.rolling(Timedelta("2d"), min_periods=0).sum()
expected = Series(0.0, index=x.index)
tm.assert_series_equal(result, expected)
def test_multi_index_names():
# GH 16789, 16825
cols = MultiIndex.from_product([["A", "B"], ["C", "D", "E"]], names=["1", "2"])
df = DataFrame(np.ones((10, 6)), columns=cols)
result = df.rolling(3).cov()
tm.assert_index_equal(result.columns, df.columns)
assert result.index.names == [None, "1", "2"]
def test_rolling_axis_sum(axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)})
else:
# axis == 1
expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10)
result = df.rolling(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
def test_rolling_axis_count(axis_frame):
# see gh-26055
df = DataFrame({"x": range(3), "y": range(3)})
axis = df._get_axis_number(axis_frame)
if axis in [0, "index"]:
expected = DataFrame({"x": [1.0, 2.0, 2.0], "y": [1.0, 2.0, 2.0]})
else:
expected = DataFrame({"x": [1.0, 1.0, 1.0], "y": [2.0, 2.0, 2.0]})
result = df.rolling(2, axis=axis_frame, min_periods=0).count()
tm.assert_frame_equal(result, expected)
def test_readonly_array():
# GH-27766
arr = np.array([1, 3, np.nan, 3, 5])
arr.setflags(write=False)
result = Series(arr).rolling(2).mean()
expected = Series([np.nan, 2, np.nan, np.nan, 4])
tm.assert_series_equal(result, expected)
def test_rolling_datetime(axis_frame, tz_naive_fixture):
# GH-28192
tz = tz_naive_fixture
df = DataFrame(
{i: [1] * 2 for i in date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)}
)
if axis_frame in [0, "index"]:
result = df.T.rolling("2D", axis=axis_frame).sum().T
else:
result = df.rolling("2D", axis=axis_frame).sum()
expected = DataFrame(
{
**{
i: [1.0] * 2
for i in date_range("2019-8-01", periods=1, freq="D", tz=tz)
},
**{
i: [2.0] * 2
for i in date_range("2019-8-02", "2019-8-03", freq="D", tz=tz)
},
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"center, expected_data",
[
(
True,
(
[88.0] * 7
+ [97.0] * 9
+ [98.0]
+ [99.0] * 21
+ [95.0] * 16
+ [93.0] * 5
+ [89.0] * 5
+ [96.0] * 21
+ [94.0] * 14
+ [90.0] * 13
+ [88.0] * 2
+ [90.0] * 9
+ [96.0] * 21
+ [95.0] * 6
+ [91.0]
+ [87.0] * 6
+ [92.0] * 21
+ [83.0] * 2
+ [86.0] * 10
+ [87.0] * 5
+ [98.0] * 21
+ [97.0] * 14
+ [93.0] * 7
+ [87.0] * 4
+ [86.0] * 4
+ [95.0] * 21
+ [85.0] * 14
+ [83.0] * 2
+ [76.0] * 5
+ [81.0] * 2
+ [98.0] * 21
+ [95.0] * 14
+ [91.0] * 7
+ [86.0]
+ [93.0] * 3
+ [95.0] * 29
+ [77.0] * 2
),
),
(
False,
(
[np.nan] * 2
+ [88.0] * 16
+ [97.0] * 9
+ [98.0]
+ [99.0] * 21
+ [95.0] * 16
+ [93.0] * 5
+ [89.0] * 5
+ [96.0] * 21
+ [94.0] * 14
+ [90.0] * 13
+ [88.0] * 2
+ [90.0] * 9
+ [96.0] * 21
+ [95.0] * 6
+ [91.0]
+ [87.0] * 6
+ [92.0] * 21
+ [83.0] * 2
+ [86.0] * 10
+ [87.0] * 5
+ [98.0] * 21
+ [97.0] * 14
+ [93.0] * 7
+ [87.0] * 4
+ [86.0] * 4
+ [95.0] * 21
+ [85.0] * 14
+ [83.0] * 2
+ [76.0] * 5
+ [81.0] * 2
+ [98.0] * 21
+ [95.0] * 14
+ [91.0] * 7
+ [86.0]
+ [93.0] * 3
+ [95.0] * 20
),
),
],
)
def test_rolling_window_as_string(center, expected_data):
# see gh-22590
date_today = datetime.now()
days = date_range(date_today, date_today + timedelta(365), freq="D")
npr = np.random.RandomState(seed=421)
data = npr.randint(1, high=100, size=len(days))
df = DataFrame({"DateCol": days, "metric": data})
df.set_index("DateCol", inplace=True)
result = df.rolling(window="21D", min_periods=2, closed="left", center=center)[
"metric"
].agg("max")
index = days.rename("DateCol")
index = index._with_freq(None)
expected = Series(expected_data, index=index, name="metric")
tm.assert_series_equal(result, expected)
def test_min_periods1():
# GH#6795
df = DataFrame([0, 1, 2, 1, 0], columns=["a"])
result = df["a"].rolling(3, center=True, min_periods=1).max()
expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a")
tm.assert_series_equal(result, expected)
def test_rolling_count_with_min_periods(frame_or_series):
# GH 26996
result = frame_or_series(range(5)).rolling(3, min_periods=3).count()
expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0])
tm.assert_equal(result, expected)
def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
# GH 26996
values = [1, 2, 3, np.nan, 4, 5, 6]
expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]
# GH 31302
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = frame_or_series(values).rolling(3).count()
expected = frame_or_series(expected_counts)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"df,expected,window,min_periods",
[
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
3,
None,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [2, 3], "B": [5, 6]}, [1, 2]),
],
2,
1,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [2, 3], "B": [5, 6]}, [1, 2]),
],
2,
2,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [2], "B": [5]}, [1]),
({"A": [3], "B": [6]}, [2]),
],
1,
1,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [2], "B": [5]}, [1]),
({"A": [3], "B": [6]}, [2]),
],
1,
0,
),
(DataFrame({"A": [1], "B": [4]}), [], 2, None),
(DataFrame({"A": [1], "B": [4]}), [], 2, 1),
(DataFrame(), [({}, [])], 2, None),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
3,
2,
),
],
)
def test_iter_rolling_dataframe(df, expected, window, min_periods):
# GH 11704
expected = [DataFrame(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(
expected, df.rolling(window, min_periods=min_periods)
):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"expected,window",
[
(
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [2, 3], "B": [5, 6]}, [1, 2]),
],
"2D",
),
(
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
"3D",
),
(
[
({"A": [1], "B": [4]}, [0]),
({"A": [2], "B": [5]}, [1]),
({"A": [3], "B": [6]}, [2]),
],
"1D",
),
],
)
def test_iter_rolling_on_dataframe(expected, window):
# GH 11704, 40373
df = DataFrame(
{
"A": [1, 2, 3, 4, 5],
"B": [4, 5, 6, 7, 8],
"C": date_range(start="2016-01-01", periods=5, freq="D"),
}
)
expected = [
DataFrame(values, index=df.loc[index, "C"]) for (values, index) in expected
]
for (expected, actual) in zip(expected, df.rolling(window, on="C")):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"ser,expected,window, min_periods",
[
(
Series([1, 2, 3]),
[([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],
3,
None,
),
(
Series([1, 2, 3]),
[([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],
3,
1,
),
(
Series([1, 2, 3]),
[([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])],
2,
1,
),
(
Series([1, 2, 3]),
[([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])],
2,
2,
),
(Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 0),
(Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 1),
(Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2, 0),
(Series([], dtype="int64"), [], 2, 1),
],
)
def test_iter_rolling_series(ser, expected, window, min_periods):
# GH 11704
expected = [Series(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(
expected, ser.rolling(window, min_periods=min_periods)
):
tm.assert_series_equal(actual, expected)
@pytest.mark.parametrize(
"expected,expected_index,window",
[
(
[[0], [1], [2], [3], [4]],
[
date_range("2020-01-01", periods=1, freq="D"),
date_range("2020-01-02", periods=1, freq="D"),
date_range("2020-01-03", periods=1, freq="D"),
date_range("2020-01-04", periods=1, freq="D"),
date_range("2020-01-05", periods=1, freq="D"),
],
"1D",
),
(
[[0], [0, 1], [1, 2], [2, 3], [3, 4]],
[
date_range("2020-01-01", periods=1, freq="D"),
date_range("2020-01-01", periods=2, freq="D"),
date_range("2020-01-02", periods=2, freq="D"),
date_range("2020-01-03", periods=2, freq="D"),
date_range("2020-01-04", periods=2, freq="D"),
],
"2D",
),
(
[[0], [0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]],
[
date_range("2020-01-01", periods=1, freq="D"),
date_range("2020-01-01", periods=2, freq="D"),
date_range("2020-01-01", periods=3, freq="D"),
date_range("2020-01-02", periods=3, freq="D"),
date_range("2020-01-03", periods=3, freq="D"),
],
"3D",
),
],
)
def test_iter_rolling_datetime(expected, expected_index, window):
# GH 11704
ser = Series(range(5), index=date_range(start="2020-01-01", periods=5, freq="D"))
expected = [
Series(values, index=idx) for (values, idx) in zip(expected, expected_index)
]
for (expected, actual) in zip(expected, ser.rolling(window)):
tm.assert_series_equal(actual, expected)
@pytest.mark.parametrize(
"grouping,_index",
[
(
{"level": 0},
MultiIndex.from_tuples(
[(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]
),
),
(
{"by": "X"},
MultiIndex.from_tuples(
[(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=["X", None]
),
),
],
)
def test_rolling_positional_argument(grouping, _index, raw):
# GH 34605
def scaled_sum(*args):
if len(args) < 2:
raise ValueError("The function needs two arguments")
array, scale = args
return array.sum() / scale
df = DataFrame(data={"X": range(5)}, index=[0, 0, 1, 1, 1])
expected = DataFrame(data={"X": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index)
# GH 40341
if "by" in grouping:
expected = expected.drop(columns="X", errors="ignore")
result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("add", [0.0, 2.0])
def test_rolling_numerical_accuracy_kahan_mean(add):
# GH: 36031 implementing kahan summation
df = DataFrame(
{"A": [3002399751580331.0 + add, -0.0, -0.0]},
index=[
Timestamp("19700101 09:00:00"),
Timestamp("19700101 09:00:03"),
Timestamp("19700101 09:00:06"),
],
)
result = (
df.resample("1s").ffill().rolling("3s", closed="left", min_periods=3).mean()
)
dates = date_range("19700101 09:00:00", periods=7, freq="S")
expected = DataFrame(
{
"A": [
np.nan,
np.nan,
np.nan,
3002399751580330.5,
2001599834386887.25,
1000799917193443.625,
0.0,
]
},
index=dates,
)
tm.assert_frame_equal(result, expected)
def test_rolling_numerical_accuracy_kahan_sum():
# GH: 13254
df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"])
result = df["x"].rolling(3).sum()
expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x")
tm.assert_series_equal(result, expected)
def test_rolling_numerical_accuracy_jump():
# GH: 32761
index = date_range(start="2020-01-01", end="2020-01-02", freq="60s").append(
DatetimeIndex(["2020-01-03"])
)
data = np.random.rand(len(index))
df = DataFrame({"data": data}, index=index)
result = df.rolling("60s").mean()
tm.assert_frame_equal(result, df[["data"]])
def test_rolling_numerical_accuracy_small_values():
# GH: 10319
s = Series(
data=[0.00012456, 0.0003, -0.0, -0.0],
index=date_range("1999-02-03", "1999-02-06"),
)
result = s.rolling(1).mean()
tm.assert_series_equal(result, s)
def test_rolling_numerical_too_large_numbers():
# GH: 11645
dates = date_range("2015-01-01", periods=10, freq="D")
ds = Series(data=range(10), index=dates, dtype=np.float64)
ds[2] = -9e33
result = ds.rolling(5).mean()
expected = Series(
[
np.nan,
np.nan,
np.nan,
np.nan,
-1.8e33,
-1.8e33,
-1.8e33,
5.0,
6.0,
7.0,
],
index=dates,
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("func", "value"),
[("sum", 2.0), ("max", 1.0), ("min", 1.0), ("mean", 1.0), ("median", 1.0)],
)
def test_rolling_mixed_dtypes_axis_1(func, value):
# GH: 20649
df = DataFrame(1, index=[1, 2], columns=["a", "b", "c"])
df["c"] = 1.0
result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)()
expected = DataFrame(
{"a": [1.0, 1.0], "b": [value, value], "c": [value, value]},
index=[1, 2],
)
tm.assert_frame_equal(result, expected)
def test_rolling_axis_one_with_nan():
# GH: 35596
df = DataFrame(
[
[0, 1, 2, 4, np.nan, np.nan, np.nan],
[0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[0, 2, 2, np.nan, 2, np.nan, 1],
]
)
result = df.rolling(window=7, min_periods=1, axis="columns").sum()
expected = DataFrame(
[
[0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0],
[0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0],
[0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0],
]
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"value",
["test", to_datetime("2019-12-31"), to_timedelta("1 days 06:05:01.00003")],
)
def test_rolling_axis_1_non_numeric_dtypes(value):
# GH: 20649
df = DataFrame({"a": [1, 2]})
df["b"] = value
result = df.rolling(window=2, min_periods=1, axis=1).sum()
expected = DataFrame({"a": [1.0, 2.0]})
tm.assert_frame_equal(result, expected)
def test_rolling_on_df_transposed():
# GH: 32724
df = DataFrame({"A": [1, None], "B": [4, 5], "C": [7, 8]})
expected = DataFrame({"A": [1.0, np.nan], "B": [5.0, 5.0], "C": [11.0, 13.0]})
result = df.rolling(min_periods=1, window=2, axis=1).sum()
tm.assert_frame_equal(result, expected)
result = df.T.rolling(min_periods=1, window=2).sum().T
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("index", "window"),
[
(
period_range(start="2020-01-01 08:00", end="2020-01-01 08:08", freq="T"),
"2T",
),
(
period_range(start="2020-01-01 08:00", end="2020-01-01 12:00", freq="30T"),
"1h",
),
],
)
@pytest.mark.parametrize(
("func", "values"),
[
("min", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]),
("max", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]),
("sum", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]),
],
)
def test_rolling_period_index(index, window, func, values):
# GH: 34225
ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)
result = getattr(ds.rolling(window, closed="left"), func)()
expected = Series(values, index=index)
tm.assert_series_equal(result, expected)
def test_rolling_sem(frame_or_series):
# GH: 26476
obj = frame_or_series([0, 1, 2])
result = obj.rolling(2, min_periods=1).sem()
if isinstance(result, DataFrame):
result = Series(result[0].values)
expected = Series([np.nan] + [0.7071067811865476] * 2)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(is_platform_arm() and not is_platform_mac(), reason="GH 38921")
@pytest.mark.parametrize(
("func", "third_value", "values"),
[
("var", 1, [5e33, 0, 0.5, 0.5, 2, 0]),
("std", 1, [7.071068e16, 0, 0.7071068, 0.7071068, 1.414214, 0]),
("var", 2, [5e33, 0.5, 0, 0.5, 2, 0]),
("std", 2, [7.071068e16, 0.7071068, 0, 0.7071068, 1.414214, 0]),
],
)
def test_rolling_var_numerical_issues(func, third_value, values):
# GH: 37051
ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1])
result = getattr(ds.rolling(2), func)()
expected = Series([np.nan] + values)
tm.assert_series_equal(result, expected)
def test_timeoffset_as_window_parameter_for_corr():
# GH: 28266
exp = DataFrame(
{
"B": [
np.nan,
np.nan,
0.9999999999999998,
-1.0,
1.0,
-0.3273268353539892,
0.9999999999999998,
1.0,
0.9999999999999998,
1.0,
],
"A": [
np.nan,
np.nan,
-1.0,
1.0000000000000002,
-0.3273268353539892,
0.9999999999999966,
1.0,
1.0000000000000002,
1.0,
1.0000000000000002,
],
},
index=MultiIndex.from_tuples(
[
(Timestamp("20130101 09:00:00"), "B"),
(Timestamp("20130101 09:00:00"), "A"),
(Timestamp("20130102 09:00:02"), "B"),
(Timestamp("20130102 09:00:02"), "A"),
(Timestamp("20130103 09:00:03"), "B"),
(Timestamp("20130103 09:00:03"), "A"),
(Timestamp("20130105 09:00:05"), "B"),
(Timestamp("20130105 09:00:05"), "A"),
(Timestamp("20130106 09:00:06"), "B"),
(Timestamp("20130106 09:00:06"), "A"),
]
),
)
df = DataFrame(
{"B": [0, 1, 2, 4, 3], "A": [7, 4, 6, 9, 3]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130102 09:00:02"),
Timestamp("20130103 09:00:03"),
Timestamp("20130105 09:00:05"),
Timestamp("20130106 09:00:06"),
],
)
res = df.rolling(window="3d").corr()
tm.assert_frame_equal(exp, res)
@pytest.mark.parametrize("method", ["var", "sum", "mean", "skew", "kurt", "min", "max"])
def test_rolling_decreasing_indices(method):
"""
Make sure that decreasing indices give the same results as increasing indices.
GH 36933
"""
df = DataFrame({"values": np.arange(-15, 10) ** 2})
df_reverse = DataFrame({"values": df["values"][::-1]}, index=df.index[::-1])
increasing = getattr(df.rolling(window=5), method)()
decreasing = getattr(df_reverse.rolling(window=5), method)()
assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12
@pytest.mark.parametrize(
"method,expected",
[
(
"var",
[
float("nan"),
43.0,
float("nan"),
136.333333,
43.5,
94.966667,
182.0,
318.0,
],
),
(
"mean",
[float("nan"), 7.5, float("nan"), 21.5, 6.0, 9.166667, 13.0, 17.5],
),
(
"sum",
[float("nan"), 30.0, float("nan"), 86.0, 30.0, 55.0, 91.0, 140.0],
),
(
"skew",
[
float("nan"),
0.709296,
float("nan"),
0.407073,
0.984656,
0.919184,
0.874674,
0.842418,
],
),
(
"kurt",
[
float("nan"),
-0.5916711736073559,
float("nan"),
-1.0028993131317954,
-0.06103844629409494,
-0.254143227116194,
-0.37362637362637585,
-0.45439658241367054,
],
),
],
)
def test_rolling_non_monotonic(method, expected):
"""
Make sure the (rare) branch of non-monotonic indices is covered by a test.
output from 1.1.3 is assumed to be the expected output. Output of sum/mean has
manually been verified.
GH 36933.
"""
# Based on an example found in computation.rst
use_expanding = [True, False, True, False, True, True, True, True]
df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=4, use_expanding=use_expanding)
result = getattr(df.rolling(indexer), method)()
expected = DataFrame({"values": expected})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("index", "window"),
[
([0, 1, 2, 3, 4], 2),
(date_range("2001-01-01", freq="D", periods=5), "2D"),
],
)
def test_rolling_corr_timedelta_index(index, window):
# GH: 31286
x = Series([1, 2, 3, 4, 5], index=index)
y = x.copy()
x[0:2] = 0.0
result = x.rolling(window).corr(y)
expected = Series([np.nan, np.nan, 1, 1, 1], index=index)
tm.assert_almost_equal(result, expected)
def test_groupby_rolling_nan_included():
# GH 35542
data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}
df = DataFrame(data)
result = df.groupby("group", dropna=False).rolling(1, min_periods=1).mean()
expected = DataFrame(
{"B": [0.0, 2.0, 3.0, 1.0, 4.0]},
# GH-38057 from_tuples puts the NaNs in the codes, result expects them
# to be in the levels, at the moment
# index=MultiIndex.from_tuples(
# [("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)],
# names=["group", None],
# ),
index=MultiIndex(
[["g1", "g2", np.nan], [0, 1, 2, 3, 4]],
[[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]],
names=["group", None],
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["skew", "kurt"])
def test_rolling_skew_kurt_numerical_stability(method):
# GH#6929
ser = Series(np.random.rand(10))
ser_copy = ser.copy()
expected = getattr(ser.rolling(3), method)()
tm.assert_series_equal(ser, ser_copy)
ser = ser + 50000
result = getattr(ser.rolling(3), method)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("method", "values"),
[
("skew", [2.0, 0.854563, 0.0, 1.999984]),
("kurt", [4.0, -1.289256, -1.2, 3.999946]),
],
)
def test_rolling_skew_kurt_large_value_range(method, values):
# GH: 37557
s = Series([3000000, 1, 1, 2, 3, 4, 999])
result = getattr(s.rolling(4), method)()
expected = Series([np.nan] * 3 + values)
tm.assert_series_equal(result, expected)
def test_invalid_method():
with pytest.raises(ValueError, match="method must be 'table' or 'single"):
Series(range(1)).rolling(1, method="foo")
@pytest.mark.parametrize("window", [1, "1d"])
def test_rolling_descending_date_order_with_offset(window, frame_or_series):
# GH#40002
idx = date_range(start="2020-01-01", end="2020-01-03", freq="1d")
obj = frame_or_series(range(1, 4), index=idx)
result = obj.rolling("1d", closed="left").sum()
expected = frame_or_series([np.nan, 1, 2], index=idx)
tm.assert_equal(result, expected)
result = obj.iloc[::-1].rolling("1d", closed="left").sum()
idx = date_range(start="2020-01-03", end="2020-01-01", freq="-1d")
expected = frame_or_series([np.nan, 3, 2], index=idx)
tm.assert_equal(result, expected)
def test_rolling_var_floating_artifact_precision():
# GH 37051
s = Series([7, 5, 5, 5])
result = s.rolling(3).var()
expected = Series([np.nan, np.nan, 4 / 3, 0])
tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)
def test_rolling_std_small_values():
# GH 37051
s = Series(
[
0.00000054,
0.00000053,
0.00000054,
]
)
result = s.rolling(2).std()
expected = Series([np.nan, 7.071068e-9, 7.071068e-9])
tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)
@pytest.mark.parametrize(
"start, exp_values",
[
(1, [0.03, 0.0155, 0.0155, 0.011, 0.01025]),
(2, [0.001, 0.001, 0.0015, 0.00366666]),
],
)
def test_rolling_mean_all_nan_window_floating_artifacts(start, exp_values):
# GH#41053
df = DataFrame(
[
0.03,
0.03,
0.001,
np.NaN,
0.002,
0.008,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
0.005,
0.2,
]
)
values = exp_values + [
0.00366666,
0.005,
0.005,
0.008,
np.NaN,
np.NaN,
0.005,
0.102500,
]
expected = DataFrame(
values,
index=list(range(start, len(values) + start)),
)
result = df.iloc[start:].rolling(5, min_periods=0).mean()
tm.assert_frame_equal(result, expected)
def test_rolling_sum_all_nan_window_floating_artifacts():
# GH#41053
df = DataFrame([0.002, 0.008, 0.005, np.NaN, np.NaN, np.NaN])
result = df.rolling(3, min_periods=0).sum()
expected = DataFrame([0.002, 0.010, 0.015, 0.013, 0.005, 0.0])
tm.assert_frame_equal(result, expected)
def test_rolling_zero_window():
# GH 22719
s = Series(range(1))
result = s.rolling(0).min()
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
|
gfyoung/pandas
|
pandas/tests/window/test_rolling.py
|
Python
|
bsd-3-clause
| 42,869
|
from .permission import *
from .user import *
from .comment import *
from .post import *
from .tag import *
from .link import *
|
finron/finepy
|
fine/models/__init__.py
|
Python
|
bsd-3-clause
| 128
|
#! /usr/bin/python
'''
Find the kth largest element in an unsorted array. Note that it is the kth largest element in the sorted order, not the kth distinct element.
For example,
Given [3,2,1,5,6,4] and k = 2, return 5.
Note:
You may assume k is always valid, 1 <= k <= array's length.
'''
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer}
def findKthLargest(self, nums, k):
import heapq
heap = list()
for ex in nums:
heapq.heappush(heap, ex)
if len(heap) > k:
heapq.heappop(heap)
return heapq.heappop(heap)
def findKthElement(self, nums, k):
size = len(nums)
k = size - k + 1
if size < k:
raise ValueError("size of list(%d) is smaller than k(%d)" % (size, k))
start = 0
end = size - 1
while True:
pivot = nums[start]
pivot_index = start
for index in range(start, end+1):
if nums[index] < pivot:
pivot_index += 1
nums[index], nums[pivot_index] = nums[pivot_index], nums[index]
nums[start], nums[pivot_index] = nums[pivot_index], nums[start]
left_size = pivot_index - start + 1
# search first half
if left_size > k:
end = pivot_index
# search second half
elif left_size < k:
start = pivot_index + 1
k -= left_size
else:
return nums[pivot_index]
if __name__ == '__main__':
import random
data = range(1, 11)
random.shuffle(data)
solution = Solution()
sort = sorted(data)
k = 4
print "Expected %d, got" % (sort[-k])
print solution.findKthElement(data, k)
|
shub0/algorithm-data-structure
|
python/find_kth_element.py
|
Python
|
bsd-3-clause
| 1,814
|
from datetime import timedelta
from celery.task import Task, PeriodicTask
from celery.registry import tasks
from .models import EnrolledDeployedQuestionsAnswered, create_record_enrolled_deployed_questions_answered
class CreateSystemReport(Task):
def run(self, **kwargs):
if kwargs.has_key('role'):
role = kwargs.get('role')
if role in ['DFO', 'DEO']:
pass
else:
pass
tasks.register(CreateSystemReport)
class CreateRecordEnrolledDeployedQuestionsAnswered(Task):
def run(self, **kwargs):
create_record_enrolled_deployed_questions_answered(model=EnrolledDeployedQuestionsAnswered)
tasks.register(CreateRecordEnrolledDeployedQuestionsAnswered)
class ProcessRecordCreation(PeriodicTask):
run_every = timedelta(minutes=5)
def run(self, **kwargs):
CreateRecordEnrolledDeployedQuestionsAnswered.delay()
logger = self.get_logger(**kwargs)
logger.info("Running CreateRecordEnrolledDeployedQuestionsAnswered")
return
tasks.register(ProcessRecordCreation)
|
unicefuganda/edtrac
|
edtrac_project/rapidsms_edtrac/education/tasks.py
|
Python
|
bsd-3-clause
| 1,089
|
"""
mweigert@mpi-cbg.de
"""
from __future__ import absolute_import, print_function
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from time import time
from spimagine import MainWidget, DemoData, NumpyData, DataModel, qt_exec
from spimagine.gui.glwidget import GLWidget
from spimagine import logger
#logger.setLevel(logger.DEBUG)
from tifffile import imread
def test_widget():
app = QtWidgets.QApplication(sys.argv)
win = MainWidget()
#win = GLWidget()
t = time()
win.setModel(DataModel(DemoData()))
print("time to set model: ", time()-t)
win.show()
# win.raise_()
QtCore.QTimer.singleShot(100,win.closeMe)
#QtCore.QTimer.singleShot(100,win.close)
app.exec_()
if __name__ == '__main__':
test_widget()
|
maweigert/spimagine
|
tests/test_gui/test_widget.py
|
Python
|
bsd-3-clause
| 771
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from PyQt4 import QtCore, QtGui
from itertools import izip
import os
import string
from vistrails.core import debug
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core.modules.basic_modules import identifier as basic_identifier
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.utils import create_port_spec_string
from vistrails.core.vistrail.port_spec import PortSpec
from vistrails.core.system import vistrails_root_directory
from vistrails.gui.modules.utils import get_widget_class
from vistrails.gui.common_widgets import QToolWindowInterface
from vistrails.gui.port_documentation import QPortDocumentation
from vistrails.gui.theme import CurrentTheme
def letterIcon(letter, crossed=False):
""" Creates icon with a specific letter
"""
pixmap = QtGui.QPixmap(48,48)
pixmap.fill(QtCore.Qt.transparent)
painter = QtGui.QPainter(pixmap)
painter.setPen(QtGui.QColor(0, 0, 0, 255))
font = painter.font()
font.setPointSize(40)
painter.setFont(font)
painter.drawText(0, 0, 48, 48, QtCore.Qt.AlignCenter, letter)
if crossed:
painter.drawLine(0,0,48,48)
painter.drawLine(0,48,48,0)
painter.end()
return QtGui.QIcon(pixmap)
class AliasLabel(QtGui.QLabel):
"""
AliasLabel is a QLabel that supports hover actions similar
to a hot link
"""
def __init__(self, alias='', text='', default_label='', parent=None):
""" AliasLabel(alias:str , text: str, default_label: str,
parent: QWidget) -> QHoverAliasLabel
Initialize the label with a text
"""
QtGui.QLabel.__init__(self, parent)
self.alias = alias
self.caption = text
# catch None
if default_label:
self.default_label = default_label
else:
self.default_label = ""
self.updateText()
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setToolTip(alias)
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def updateText(self):
""" updateText() -> None
Update the label text to contain the alias name when appropriate
"""
if self.alias != '':
self.setText(self.alias + ': ' + self.caption)
elif self.default_label != '':
self.setText(self.default_label + ': ' + self.caption)
else:
self.setText(self.caption)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
# return super(QHoverAliasLabel, self).event(event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the alias name
"""
if event.button()==QtCore.Qt.LeftButton:
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Parameter Alias',
'Enter the parameter alias',
QtGui.QLineEdit.Normal,
self.alias)
while ok and self.parent().check_alias(str(text)):
msg =" This alias is already being used.\
Please enter a different parameter alias "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Parameter Alias',
msg,
QtGui.QLineEdit.Normal,
text)
if ok and str(text)!=self.alias:
if not self.parent().check_alias(str(text)):
self.alias = str(text).strip()
self.updateText()
self.parent().updateMethod()
class Parameter(object):
def __init__(self, desc, psi=None):
self.type = desc.name
self.identifier = desc.identifier
self.namespace = None if not desc.namespace else desc.namespace
self.strValue = ''
self.alias = ''
self.queryMethod = None
self.port_spec_item = psi
self.param_exists = False
class Function(object):
def __init__(self, name, params, port_spec=None):
self.name = name
self.parameters = params
self.port_spec = port_spec
def get_spec(self, port_type):
return self.port_spec
class ParameterEntry(QtGui.QTreeWidgetItem):
plus_icon = QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/plus.png'))
minus_icon = QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/minus.png'))
def __init__(self, port_spec, function=None, types_visible=True, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent)
self.setFirstColumnSpanned(True)
self.port_spec = port_spec
self.function = function
self.types_visible = types_visible
def build_widget(self, widget_accessor, with_alias=True):
reg = get_module_registry()
# widget = QtGui.QDockWidget()
# widget.setFeatures(QtGui.QDockWidget.DockWidgetClosable |
# QtGui.QDockWidget.DockWidgetVerticalTitleBar)
widget = QtGui.QWidget()
h_layout = QtGui.QHBoxLayout()
h_layout.insertSpacing(0, 10)
h_layout.setMargin(2)
h_layout.setSpacing(2)
v_layout = QtGui.QVBoxLayout()
v_layout.setAlignment(QtCore.Qt.AlignVCenter)
delete_button = QtGui.QToolButton()
delete_button.setIconSize(QtCore.QSize(8,8))
delete_button.setIcon(ParameterEntry.minus_icon)
def delete_method():
if self.function is not None:
self.group_box.parent().parent().parent().delete_method(
self, self.port_spec.name, self.function.real_id)
else:
self.group_box.parent().parent().parent().delete_method(
self, self.port_spec.name, None)
QtCore.QObject.connect(delete_button, QtCore.SIGNAL("clicked()"),
delete_method)
v_layout.addWidget(delete_button)
add_button = QtGui.QToolButton()
add_button.setIcon(ParameterEntry.plus_icon)
add_button.setIconSize(QtCore.QSize(8,8))
def add_method():
self.group_box.parent().parent().parent().add_method(
self.port_spec.name)
QtCore.QObject.connect(add_button, QtCore.SIGNAL("clicked()"),
add_method)
v_layout.addWidget(add_button)
h_layout.addLayout(v_layout)
self.my_widgets = []
self.my_labels = []
self.group_box = QtGui.QGroupBox()
layout = QtGui.QGridLayout()
layout.setMargin(5)
layout.setSpacing(5)
layout.setColumnStretch(1,1)
self.group_box.setFocusPolicy(QtCore.Qt.ClickFocus)
self.group_box.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Fixed)
self.group_box.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
if self.function is not None:
params = self.function.parameters
else:
params = [None,] * len(self.port_spec.descriptors())
for i, (psi, param) in enumerate(izip(self.port_spec.port_spec_items,
params)):
if psi.entry_type is not None:
# !!only pull off the prefix!! options follow in camelcase
prefix_end = len(psi.entry_type.lstrip(string.lowercase))
if prefix_end == 0:
entry_type = psi.entry_type
else:
entry_type = psi.entry_type[:-prefix_end]
else:
entry_type = None
widget_class = widget_accessor(psi.descriptor, entry_type)
if param is not None:
obj = param
else:
obj = Parameter(psi.descriptor)
obj.port_spec_item = psi
if self.types_visible:
if with_alias:
label = AliasLabel(obj.alias, obj.type, psi.label)
self.my_labels.append(label)
else:
label = QtGui.QLabel(obj.type)
layout.addWidget(label, i, 0)
layout.setAlignment(label, QtCore.Qt.AlignLeft)
param_widget = widget_class(obj, self.group_box)
self.my_widgets.append(param_widget)
layout.addWidget(param_widget, i, 1)
layout.addItem(QtGui.QSpacerItem(0,0, QtGui.QSizePolicy.MinimumExpanding), i, 2)
self.group_box.setLayout(layout)
def updateMethod():
if self.function is not None:
real_id = self.function.real_id
else:
real_id = -1
self.group_box.parent().parent().parent().update_method(
self, self.port_spec.name, self.my_widgets, self.my_labels, real_id)
def check_alias(name):
controller = self.group_box.parent().parent().parent().controller
if controller:
return controller.check_alias(name)
return False
self.group_box.updateMethod = updateMethod
self.group_box.check_alias = check_alias
h_layout.addWidget(self.group_box)
widget.setLayout(h_layout)
return widget
def get_widget(self):
return self.build_widget(get_widget_class, True)
class PortItem(QtGui.QTreeWidgetItem):
edit_show = QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/pencil.png'))
edit_hide = QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/pencil-disabled.png'))
eye_open_icon = \
QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/eye.png'))
eye_closed_icon = \
QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/eye_closed.png'))
eye_disabled_icon = \
QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/eye_gray.png'))
conn_icon = \
QtGui.QIcon(os.path.join(vistrails_root_directory(),
'gui/resources/images/connection.png'))
def __init__(self, port_spec, is_connected, is_optional, is_visible,
is_editable=False, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent)
# self.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
self.setFlags(QtCore.Qt.ItemIsEnabled)
# self.setCheckState(0, QtCore.Qt.Unchecked)
self.port_spec = port_spec
self.is_connected = is_connected
self.is_optional = is_optional
self.is_visible = is_visible
self.is_editable = is_editable
self.is_unset = False
self.build_item(port_spec, is_connected, is_optional, is_visible,
is_editable)
def visible(self):
return not self.is_optional or self.is_visible
def set_visible(self, visible):
self.is_visible = visible
if visible:
self.setIcon(0, PortItem.eye_open_icon)
else:
self.setIcon(0, PortItem.eye_closed_icon)
def set_editable(self, edit):
self.is_editable = edit
if edit:
self.setIcon(0, PortItem.edit_show)
else:
self.setIcon(0, PortItem.edit_hide)
def get_visible(self):
return self.visible_checkbox
def get_connected(self):
return self.connected_checkbox
def is_constant(self):
return (self.port_spec.is_valid and
get_module_registry().is_constant(self.port_spec))
def calcUnset(self):
self.is_unset = self.is_constant() and \
self.port_spec.is_mandatory() and \
not self.is_connected and \
not self.isExpanded()
if self.is_unset:
font = self.font(3)
font.setWeight(QtGui.QFont.Bold)
self.setFont(3, font)
def build_item(self, port_spec, is_connected, is_optional, is_visible, is_editable):
if not is_optional:
self.setIcon(1, PortItem.eye_disabled_icon)
elif is_visible:
self.setIcon(1, PortItem.eye_open_icon)
else:
self.setIcon(1, PortItem.eye_closed_icon)
if is_connected:
self.setIcon(2, PortItem.conn_icon)
self.setText(3, port_spec.name)
if self.is_constant():
if len(self.port_spec.port_spec_items)>0:
if is_editable:
self.setIcon(0, PortItem.edit_show)
else:
self.setIcon(0, PortItem.edit_hide)
else:
# if port_spec is not a method, make it gray
self.setForeground(3,
QtGui.QBrush(QtGui.QColor.fromRgb(128,128,128)))
self.visible_checkbox = QtGui.QCheckBox()
self.connected_checkbox = QtGui.QCheckBox()
def contextMenuEvent(self, event, widget):
if self.port_spec is None:
return
act = QtGui.QAction("View Documentation", widget)
act.setStatusTip("View method documentation")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.view_documentation)
menu = QtGui.QMenu(widget)
menu.addAction(act)
menu.exec_(event.globalPos())
def view_documentation(self):
# descriptor = self.treeWidget().module.module_descriptor
module = self.treeWidget().module
port_type = self.treeWidget().port_type
widget = QPortDocumentation(module,
port_type,
self.port_spec.name)
widget.setAttribute(QtCore.Qt.WA_DeleteOnClose)
widget.exec_()
def __lt__(self, other):
# put unset mandatory ports first
if self.is_unset != other.is_unset:
return self.is_unset and not other.is_unset
# put set (expanded) functions first
if self.isExpanded() != other.isExpanded():
return self.isExpanded() and not other.isExpanded()
# otherwise use port name
return self.port_spec.name < other.port_spec.name
class PortsList(QtGui.QTreeWidget):
def __init__(self, port_type, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.port_type = port_type
self.setColumnCount(4)
self.setColumnWidth(0,24)
self.setColumnWidth(1,24)
self.setColumnWidth(2,24)
self.setRootIsDecorated(False)
self.setIndentation(0)
self.setHeaderHidden(True)
self.connect(self, QtCore.SIGNAL("itemClicked(QTreeWidgetItem*, int)"),
self.item_clicked)
self.module = None
self.port_spec_items = {}
self.entry_klass = ParameterEntry
self.ports_visible = True
self.types_visible = True
def setReadOnly(self, read_only):
self.setEnabled(not read_only)
def set_entry_klass(self, entry_klass):
if entry_klass != entry_klass:
self.entry_klass = entry_klass
self.update_module(self.module)
def update_module(self, module):
""" update_module(module: Module) -> None
Setup this tree widget to show functions of module
"""
self.setColumnHidden(0, True)
self.setColumnHidden(1, not self.ports_visible)
# this is strange but if you try to clear the widget when the focus is
# in one of the items (after setting a parameter for example),
# VisTrails crashes on a Mac (Emanuele) This is probably a Qt bug
w = QtGui.QApplication.focusWidget()
if self.isAncestorOf(w):
w.clearFocus()
self.clear()
self.module = module
self.port_spec_items = {}
self.function_map = {}
if module and module.is_valid:
reg = get_module_registry()
descriptor = module.module_descriptor
if self.port_type == 'input':
self.setColumnHidden(0,not get_vistrails_configuration(
).check('showInlineParameterWidgets'))
port_specs = module.destinationPorts()
connected_ports = module.connected_input_ports
visible_ports = module.visible_input_ports
elif self.port_type == 'output':
port_specs = module.sourcePorts()
connected_ports = module.connected_output_ports
visible_ports = module.visible_output_ports
else:
raise TypeError("Unknown port type: '%s'" % self.port_type)
for port_spec in sorted(port_specs, key=lambda x: x.name):
connected = port_spec.name in connected_ports and \
connected_ports[port_spec.name] > 0
item = PortItem(port_spec,
connected,
port_spec.optional,
port_spec.name in visible_ports,
port_spec.name in module.editable_input_ports)
self.addTopLevelItem(item)
self.port_spec_items[port_spec.name] = (port_spec, item)
if self.port_type == 'input':
for function in module.functions:
if not function.is_valid:
continue
port_spec, item = self.port_spec_items[function.name]
subitem = self.entry_klass(port_spec, function,
self.types_visible)
self.function_map[function.real_id] = subitem
item.addChild(subitem)
subitem.setFirstColumnSpanned(True)
self.setItemWidget(subitem, 2, subitem.get_widget())
item.setExpanded(True)
# self.setItemWidget(item, 0, item.get_visible())
# self.setItemWidget(item, 1, item.get_connected())
# i = QTreeWidgetItem(self)
# self.addTopLevelItem(i)
# i.setText(2, port_spec.name)
# visible_checkbox = QtGui.QCheckBox()
# self.setItemWidget(i, 0, visible_checkbox)
# connceted_checkbox = QtGui.QCheckBox()
# connected_checkbox.setEnabled(False)
# self.setItemWidget(i, 1, connected_checkbox)
# Highlight unset ports
for _, item in self.port_spec_items.itervalues():
item.calcUnset()
self.sortItems(0, QtCore.Qt.AscendingOrder)
# base_items = {}
# # Create the base widget item for each descriptor
# for descriptor in moduleHierarchy:
# baseName = descriptor.name
# base_package = descriptor.identifier
# baseItem = QMethodTreeWidgetItem(None,
# None,
# self,
# ([]
# << baseName
# << ''))
# base_items[descriptor] = baseItem
# method_specs = {}
# # do this in reverse to ensure proper overloading
# # !!! NOTE: we have to use ***all*** input ports !!!
# # because a subclass can overload a port with a
# # type that isn't a method
# for descriptor in reversed(moduleHierarchy):
# method_specs.update((name, (descriptor, spec))
# for name, spec in \
# registry.module_ports('input',
# descriptor))
# # add local registry last so that it takes precedence
# method_specs.update((spec.name, (descriptor, spec))
# for spec in module.port_spec_list
# if spec.type == 'input')
# for _, (desc, method_spec) in sorted(method_specs.iteritems()):
# if registry.is_method(method_spec):
# baseItem = base_items[desc]
# sig = method_spec.short_sigstring
# QMethodTreeWidgetItem(module,
# method_spec,
# baseItem,
# ([]
# << method_spec.name
# << sig))
# self.expandAll()
# self.resizeColumnToContents(2)
# show invalid module attributes
if module and not module.is_valid and self.port_type == 'input':
for function in module.functions:
if function.name in self.port_spec_items:
port_spec, item = self.port_spec_items[function.name]
else:
sigstring = create_port_spec_string(
[(basic_identifier, "String")
for i in xrange(len(function.parameters))])
port_spec = PortSpec(name=function.name, type='input',
sigstring=sigstring)
item = PortItem(port_spec, False, False, False)
self.addTopLevelItem(item)
self.port_spec_items[port_spec.name] = (port_spec, item)
subitem = self.entry_klass(port_spec, function)
self.function_map[function.real_id] = subitem
item.addChild(subitem)
subitem.setFirstColumnSpanned(True)
self.setItemWidget(subitem, 2, subitem.get_widget())
item.setExpanded(True)
def item_clicked(self, item, col):
if item.parent() is not None:
return
if self.port_type == 'input':
visible_ports = self.module.visible_input_ports
editable_ports = self.module.editable_input_ports
elif self.port_type == 'output':
visible_ports = self.module.visible_output_ports
else:
raise TypeError("Unknown port type: '%s'" % self.port_type)
if col == 0:
if item.is_constant() and len(item.port_spec.port_spec_items)>0:
item.set_editable(not item.is_editable)
if item.is_editable:
editable_ports.add(item.port_spec.name)
else:
editable_ports.discard(item.port_spec.name)
self.controller.flush_delayed_actions()
self.controller.add_annotation((self.module.INLINE_WIDGET_ANNOTATION,
','.join(editable_ports)),
self.module.id)
self.controller.current_pipeline_scene.recreate_module(
self.controller.current_pipeline, self.module.id)
if col == 1:
if item.is_optional:
item.set_visible(not item.is_visible)
if item.is_visible:
visible_ports.add(item.port_spec.name)
else:
visible_ports.discard(item.port_spec.name)
self.controller.flush_delayed_actions()
self.controller.current_pipeline_scene.recreate_module(
self.controller.current_pipeline, self.module.id)
if col == 3:
if item.isExpanded():
item.setExpanded(False)
elif item.childCount() > 0:
item.setExpanded(True)
elif item.childCount() == 0 and item.is_constant():
self.do_add_method(item.port_spec, item)
def set_controller(self, controller):
self.controller = controller
def update_method(self, subitem, port_name, widgets, labels, real_id=-1):
#print 'updateMethod called', port_name
if self.controller:
_, item = self.port_spec_items[port_name]
str_values = []
query_methods = []
for w in widgets:
str_values.append(str(w.contents()))
if hasattr(w, 'query_method'):
query_methods.append(w.query_method())
if real_id < 0:
should_replace = False
else:
should_replace = True
self.controller.update_function(self.module,
port_name,
str_values,
real_id,
[str(label.alias)
for label in labels],
query_methods,
should_replace)
# FIXME need to get the function set on the item somehow
# HACK for now
for function in self.module.functions:
if function.real_id not in self.function_map:
self.function_map[function.real_id] = subitem
subitem.function = function
# make the scene display the fact that we have a parameter
# by dimming the port
# self.controller.flush_delayed_actions()
self.controller.current_pipeline_scene.update_module_functions(
self.controller.current_pipeline, self.module.id)
def delete_method(self, subitem, port_name, real_id=None):
_, item = self.port_spec_items[port_name]
item.removeChild(subitem)
if real_id is not None and self.controller:
#print "got to delete"
self.controller.delete_function(real_id, self.module.id)
# make the scene display the fact that we have lost the
# parameter by undimming the port
# self.controller.flush_delayed_actions()
self.controller.current_pipeline_scene.update_module_functions(
self.controller.current_pipeline, self.module.id)
# how to delete items...x
# subitem.deleteLater()
def do_add_method(self, port_spec, item):
"""do_add_method(port_spec: PortSpec,
item: PortItem) -> None
Displays a new method for the port.
"""
subitem = self.entry_klass(port_spec)
item.addChild(subitem)
subitem.setFirstColumnSpanned(True)
self.setItemWidget(subitem, 2, subitem.get_widget())
item.setExpanded(True)
if len(port_spec.descriptors()) == 0:
self.update_method(subitem, port_spec.name, [], [])
def add_method(self, port_name):
port_spec, item = self.port_spec_items[port_name]
self.do_add_method(port_spec, item)
def contextMenuEvent(self, event):
# Just dispatches the menu event to the widget item
item = self.itemAt(event.pos())
if item:
item.contextMenuEvent(event, self)
class QPortsPane(QtGui.QWidget, QToolWindowInterface):
def __init__(self, port_type, parent=None, flags=QtCore.Qt.Widget):
QtGui.QWidget.__init__(self, parent, flags)
self.port_type = port_type
self.build_widget()
self.controller = None
def build_widget(self):
self.tree_widget = PortsList(self.port_type)
layout = QtGui.QHBoxLayout()
layout.setMargin(0)
layout.addWidget(self.tree_widget)
self.setLayout(layout)
self.setWindowTitle('%s Ports' % self.port_type.capitalize())
def set_controller(self, controller):
self.controller = controller
self.tree_widget.set_controller(controller)
def update_module(self, module):
self.tree_widget.update_module(module)
|
hjanime/VisTrails
|
vistrails/gui/ports_pane.py
|
Python
|
bsd-3-clause
| 31,553
|
import os
import logging
import signal
import time
import errno
import traceback
import sys
from ..logs import setup_logs
log = logging.getLogger(__name__)
def call_in_child(module_name, func_name='main'):
pid = os.fork()
if pid:
log.info('Started child %s:%s (%d)' % (module_name, func_name, pid))
return pid
# We cannot allow control to return to the parent.
try:
module = __import__(module_name, fromlist=['.'])
func = getattr(module, func_name)
func()
except Exception:
traceback.print_exc()
finally:
os._exit(1)
def main():
# The different modules will parse the CLI differently, so we must force
# the user to provide envvars.
if len(sys.argv) > 1:
print >> sys.stderr, os.path.basename(sys.argv[0]), 'takes no arguments; everything must be configured by envvars.'
exit(1)
setup_logs()
pids = {}
try:
pid = call_in_child('sgcache.commands.events')
pids[pid] = 'events'
pid = call_in_child('sgcache.commands.scanner')
pids[pid] = 'scanner'
pid = call_in_child('sgcache.commands.web')
pids[pid] = 'web'
log.debug('Waiting on children')
pid, code = os.wait()
log.error('Child sgcache-%s (%s) exited with code %d' % (pids.pop(pid), pid, code))
except:
code = 100
finally:
# Ask them to stop.
for pid, name in sorted(pids.iteritems()):
log.info('Stopping sgcache-%s (%d)' % (name, pid))
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH: # Process does not exist
raise
# Give them a second...
time.sleep(1)
# Force them to stop.
for pid, name in sorted(pids.iteritems()):
try:
os.kill(pid, signal.SIGKILL)
except OSError as e:
if e.errno != errno.ESRCH: # Process does not exist
raise
os._exit(code)
|
westernx/sgcache
|
sgcache/commands/auto.py
|
Python
|
bsd-3-clause
| 2,079
|
from dimagi.ext.couchdbkit import Document
from django.core.management.base import LabelCommand, CommandError
from corehq.apps.domain.models import OldDomain
from corehq.apps.domain.shortcuts import create_domain, create_user
from corehq.apps.domain.utils import normalize_domain_name
from corehq.apps.users.models import CouchUser
from dimagi.utils.couch.database import get_db
class Command(LabelCommand):
def handle(self, *args, **options):
django_domains = OldDomain.objects.all()
django_domain_names = set([domain.name for domain in django_domains])
couch_domain_names = set([x['key'][0] for x in get_db().view('domain/docs', group_level=1).all()])
couch_user_domain_names = set([x['key'] for x in get_db().view('users/by_domain', group=True).all()])
print get_db().view('users/by_domain').all()
normalized_names = {}
domains_that_need_to_change = set()
# print some warnings if things are fishy
for domain in couch_domain_names.union(couch_user_domain_names):
if domain not in django_domain_names:
print "Warning: domain '%s' not in SQL" % domain
normalized = normalize_domain_name(domain)
if normalized in normalized_names:
print "Warning: domains '%s' and '%s' both exist" % (domain, normalized_names[normalized])
normalized_names[normalized] = domain
if normalized != domain:
domains_that_need_to_change.add(domain)
print "Going to change the following domains:"
for domain in domains_that_need_to_change:
print " %s" % domain
print
if raw_input("Are you sure you want to continue? (Y/n)") != 'Y':
print "Mission aborted"
return
print "Migrating SQL domains"
for django_domain in django_domains:
django_domain.name = normalize_domain_name(django_domain.name)
django_domain.save()
print "Migrating domains in Couch docs"
class MyDoc(Document):
class Meta:
app_label = 'domain'
def get_docs(domain):
chunk_size = 500
i = 0
while True:
docs = MyDoc.view('domain/docs',
startkey=[domain], endkey=[domain, {}],
reduce=False, include_docs=True, skip=i*chunk_size, limit=chunk_size)
for doc in docs:
yield doc
if not len(docs):
break
i += 1
for domain in domains_that_need_to_change:
print "%s:" % domain
for doc in get_docs(domain):
print '.',
if 'domain' in doc:
doc['domain'] = normalize_domain_name(doc['domain'])
if 'domains' in doc:
for i,domain in enumerate(doc['domains']):
doc['domains'][i] = normalize_domain_name(doc['domains'][i])
doc.save()
print
print "Patching users"
for domain in domains_that_need_to_change:
print '.',
couch_users = CouchUser.view('users/by_domain', key=domain, include_docs=True, reduce=False)
for user in couch_users:
for dm in user.web_account.domain_memberships:
dm.domain = normalize_domain_name(dm.domain)
for account in user.commcare_accounts:
if account.domain:
account.domain = normalize_domain_name(account.domain)
user.save()
|
puttarajubr/commcare-hq
|
corehq/apps/domain/management/commands/migrate_domain_names.py
|
Python
|
bsd-3-clause
| 3,679
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag.bzcleaner import BzCleaner
class LeaveOpen(BzCleaner):
def __init__(self):
super(LeaveOpen, self).__init__()
def description(self):
return "Closed bugs with leave-open keyword"
def get_bz_params(self, date):
start_date, end_date = self.get_dates(date)
params = {
"bug_status": ["RESOLVED", "VERIFIED", "CLOSED"],
"f1": "keywords",
"o1": "casesubstring",
"v1": "leave-open",
"f2": "resolution",
"o2": "changedafter",
"v2": start_date,
"f3": "resolution",
"o3": "changedbefore",
"v3": end_date,
}
return params
def get_autofix_change(self):
return {"keywords": {"remove": ["leave-open"]}}
if __name__ == "__main__":
LeaveOpen().run()
|
mozilla/bztools
|
auto_nag/scripts/leave_open.py
|
Python
|
bsd-3-clause
| 1,050
|
#pylint: disable=E1101,W0232,W0141,E1103,E1002,W0232,R0201,R0903,R0904,C0302
#pylint: disable=C0103
'''The overtime application encapsulates some common functionality
with the overtime approval queues in the timetracker application.
'''
import datetime
from django.db import models
from django.conf import settings
from django.core.mail import EmailMessage
from django.template import Context
from django.template.loader import get_template
from django.core.urlresolvers import reverse
from timetracker.tracker.models import TrackingEntry, Tbluser
class PendingApproval(models.Model):
'''PendingApproval is the model with which we store
Overtime/Undertime and Work at weekend approval requests.
When an agent is required to work under these specific conditions
we must be able to track this and have a proper approval
chain. The agent first creates their entry using the normal
methods and an approval request is generated. This approval
request is then available for their immediate manager to approve
or deny it.
Approving a request means that the agent will then receive the
normal follow-up e-mails and the notification plugin functions
will be ran.
Denying a request simply deletes the entry and informs the agent
that the request was not successful.
'''
created_on = models.DateField(
auto_now_add=True
)
closed = models.BooleanField()
closed_on = models.DateField(null=True, blank=True)
entry = models.ForeignKey(
TrackingEntry
)
approver = models.ForeignKey(
Tbluser,
limit_choices_to={
"user_type__in": ["SUPER", "ADMIN"]
}
)
tl_approved = models.BooleanField()
def close(self, status):
'''Close, as the name implies, closes this PendingApproval request.
When we close the entry we make a timestamp for when it was
closed and send the appropriate follow-up e-mails.
If the entry is approved, status should be True and this will
keep the entry in the system and generate all the associated
forms.
If the entry is denied, then it will delete the associated
entry and any link days which are associated with *that* entry.
:param status: Boolean indicating whether this entry was approved.
'''
if self.closed:
return
self.closed = True
self.closed_on = datetime.datetime.now()
self.save()
if status:
if self.entry.daytype == "PENDI":
self.entry.daytype = "HOLIS"
self.entry.save()
self.approved()
else:
self.denied()
def tl_close(self, status):
'''Performs a 'soft close' on the PendingApproval entry by simply
marking the tl_approved as True. This allows us to know that
an entry can be approved by a one-up manager.
'''
if self.closed:
return
if status:
self.tl_approved = True
self.save()
else:
self.denied()
def approved(self):
'''approved fires off the notifications associated with this entry.'''
self.entry.send_notifications()
def denied(self):
'''denied will inform the user that their request was not successful.'''
tmpl = get_template("emails/denied.dhtml")
ctx = Context({
"entry_date": str(self.entry.entry_date),
"daytype": self.entry.daytype,
})
email = EmailMessage(from_email='timetracker@unmonitored.com')
email.body = tmpl.render(ctx)
email.to = [self.entry.user.user_id]
email.subject = "Request for Overtime: Denied."
email.send()
if self.entry.is_linked(): # pragma: no cover
self.entry.link.unlink()
self.entry.delete()
self.delete()
def __unicode__(self): # pragma: no cover
return u'%s - %s' % (self.entry.entry_date, self.entry.user.name())
def inform_manager(self):
'''When we create a PendingApproval we can inform the manager that a
new entry was created.
'''
if not self.entry.approval_required():
return
if settings.SENDING_APPROVAL_MANAGERS.get(self.approver.market):
managers = self.entry.user.get_manager_email()
else:
managers = []
if settings.SENDING_APPROVAL_TL.get(self.approver.market): # pragma: no cover
tls = self.entry.user.get_tl_email()
else:
tls = []
recipients = managers + tls
if len(recipients) == 0:
return
tmpl = get_template("emails/inform_manager.dhtml")
ctx = Context({
"username": self.entry.user.name(),
"entry_date": str(self.entry.entry_date),
"domain": settings.DOMAIN_NAME,
"daytype": self.entry.daytype,
"rest": reverse(
"timetracker.overtime.views.accept_edit",
kwargs={"entry": self.entry.id},
)[1:] # because it has the leading slash
})
email = EmailMessage(from_email='timetracker@unmonitored.com')
email.body = tmpl.render(ctx)
email.to = recipients
email.subject = "Request for Overtime: %s" % self.entry.user.name()
email.send()
def is_holiday_request(self):
'''checks whether this entry is a holiday entry or not.'''
return self.entry.daytype == "PENDI"
|
AeroNotix/django-timetracker
|
overtime/models.py
|
Python
|
bsd-3-clause
| 5,530
|
"""
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Random sampling.
"""
import numpy as np
from smt.sampling_methods.sampling_method import SamplingMethod
class Random(SamplingMethod):
def _compute(self, nt):
"""
Compute the requested number of sampling points.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the input space.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
return np.random.rand(nt, nx)
|
bouhlelma/smt
|
smt/sampling_methods/random.py
|
Python
|
bsd-3-clause
| 665
|
import pandas as pd
import numpy as np
import pyaf.HierarchicalForecastEngine as hautof
import pyaf.Bench.TS_datasets as tsds
import datetime
#get_ipython().magic('matplotlib inline')
def train_and_force_fail(b , error_message):
try:
df = b.mPastData;
lEngine = hautof.cHierarchicalForecastEngine()
lEngine.mOptions.mHierarchicalCombinationMethod = "TD";
lEngine.mOptions.set_active_autoregressions([]);
lEngine
H = b.mHorizon;
lEngine.train(df , b.mTimeVar , b.mSignalVar, H, b.mHierarchy, None);
raise Exception("NOT_OK")
except Exception as e:
# should fail
print(str(e));
assert(str(e) == error_message)
if(str(e) == "NOT_OK"):
raise
pass
b1 = tsds.load_AU_hierarchical_dataset();
b1.mPastData[b1.mTimeVar] = None
train_and_force_fail(b1 , "PYAF_ERROR_TIME_COLUMN_TYPE_NOT_ALLOWED 'Date' 'object'")
|
antoinecarme/pyaf
|
tests/basic_checks/hierarchy_checks_date_column_bad_storage.py
|
Python
|
bsd-3-clause
| 960
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run Inspector's perf tests in perf mode."""
import os
import json
import logging
import optparse
import time
import datetime
from webkitpy.common import find_files
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.performance_tests.perftest import PerfTestFactory
from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
_log = logging.getLogger(__name__)
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option('-t', '--target', dest='configuration',
help='Specify the target build subdirectory under src/out/'),
optparse.make_option("--platform",
help="Specify port/platform being tested (e.g. mac)"),
optparse.make_option("--chromium",
action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
optparse.make_option("--android",
action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up to date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up to date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--reset-results", action="store_true",
help="Clears the content in the generated JSON file before adding the results."),
optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
help="Only used on bots. Path to a slave configuration file."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--additional-driver-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--content-shell", action="store_true",
help="Use Content Shell instead of DumpRenderTree"),
optparse.make_option("--repeat", default=1, type="int",
help="Specify number of times to run test set (default: 1)."),
optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
help="Specify number of times to invoke test runner for each performance test."),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
test_extensions = ['.html', '.svg']
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
for arg in self._args:
if filesystem.exists(filesystem.join(self._base_path, arg)):
paths.append(arg)
else:
relpath = filesystem.relpath(arg, self._base_path)
if filesystem.exists(filesystem.join(self._base_path, relpath)):
paths.append(filesystem.normpath(relpath))
else:
_log.warning('Path was not found:' + arg)
skipped_directories = set(['.svn', 'resources'])
test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
tests = []
for path in test_files:
relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
if self._options.use_skipped_list and self._port.skips_perf_test(
relative_path) and filesystem.normpath(relative_path) not in paths:
continue
test = PerfTestFactory.create_perf_test(self._port, relative_path, path,
test_runner_count=self._options.test_runner_count)
tests.append(test)
return tests
def _start_http_servers(self):
self._port.acquire_http_lock()
self._port.start_http_server(number_of_servers=2)
def _stop_http_servers(self):
self._port.stop_http_server()
self._port.release_http_lock()
def run(self):
needs_http = self._port.requires_http_server()
class FakePrinter(object):
def write_update(self, msg):
print msg
def write_throttled_update(self, msg):
pass
if self._port.check_build(needs_http=needs_http, printer=FakePrinter()):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self.EXIT_CODE_BAD_BUILD
run_count = 0
repeat = self._options.repeat
while run_count < repeat:
run_count += 1
tests = self._collect_tests()
runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
_log.info("Running %d tests%s", len(tests), runs)
try:
if needs_http:
self._start_http_servers()
unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
finally:
if needs_http:
self._stop_http_servers()
if self._options.generate_results and not self._options.profile:
exit_code = self._generate_results()
if exit_code:
return exit_code
if self._options.generate_results and not self._options.profile:
test_results_server = self._options.test_results_server
if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
return self.EXIT_CODE_FAILED_UPLOADING
if self._options.show_results:
self._port.show_results_html_file(self._results_page_path())
return unexpected
def _output_json_path(self):
output_json_path = self._options.output_json_path
if output_json_path:
return output_json_path
return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
def _results_page_path(self):
return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
def _generate_results(self):
options = self._options
output_json_path = self._output_json_path()
output = self._generate_results_dict(self._timestamp, options.description,
options.platform, options.builder_name, options.build_number)
if options.slave_config_json_path:
output = self._merge_slave_config_json(options.slave_config_json_path, output)
if not output:
return self.EXIT_CODE_BAD_SOURCE_JSON
output = self._merge_outputs_if_needed(output_json_path, output)
if not output:
return self.EXIT_CODE_BAD_MERGE
filesystem = self._host.filesystem
json_output = json.dumps(output)
filesystem.write_text_file(output_json_path, json_output)
template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
template = filesystem.read_text_file(template_path)
absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
filesystem.write_text_file(self._results_page_path(), results_page)
def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
revisions = {}
path = self._port.repository_path()
git = self._host.git(path=path)
revision = str(git.commit_position(path))
revisions['chromium'] = {'revision': revision, 'timestamp': git.timestamp_of_revision(path, revision)}
meta_info = {
'description': description,
'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
'platform': platform,
'revisions': revisions,
'builderName': builder_name,
'buildNumber': int(build_number) if build_number else None}
contents = {'tests': {}}
for key, value in meta_info.items():
if value:
contents[key] = value
for test, metrics in self._results:
for metric_name, iteration_values in metrics.iteritems():
if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
continue
tests = contents['tests']
path = test.test_name_without_file_extension().split('/')
for i in range(0, len(path)):
is_last_token = i + 1 == len(path)
url = self.view_source_url(
'PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
tests.setdefault(path[i], {'url': url})
current_test = tests[path[i]]
if is_last_token:
current_test.setdefault('metrics', {})
assert metric_name not in current_test['metrics']
current_test['metrics'][metric_name] = {'current': iteration_values}
else:
current_test.setdefault('tests', {})
tests = current_test['tests']
return contents
@staticmethod
def view_source_url(path_from_blink):
return 'https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit/%s' % path_from_blink
@staticmethod
def _datetime_in_ES5_compatible_iso_format(datetime):
return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
def _merge_slave_config_json(self, slave_config_json_path, contents):
if not self._host.filesystem.isfile(slave_config_json_path):
_log.error("Missing slave configuration JSON file: %s", slave_config_json_path)
return None
try:
slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
slave_config = json.load(slave_config_json)
for key in slave_config:
contents['builder' + key.capitalize()] = slave_config[key]
return contents
except Exception as error:
_log.error("Failed to merge slave configuration JSON file %s: %s", slave_config_json_path, error)
return None
def _merge_outputs_if_needed(self, output_json_path, output):
if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
return [output]
try:
existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
return existing_outputs + [output]
except Exception as error:
_log.error("Failed to merge output JSON file %s: %s", output_json_path, error)
return None
def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
url = "https://%s%s" % (test_results_server, host_path)
uploader = file_uploader(url, 120)
try:
response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
except Exception as error:
_log.error("Failed to upload JSON file to %s in 120s: %s", url, error)
return False
response_body = [line.strip('\n') for line in response]
if response_body != ['OK']:
try:
parsed_response = json.loads('\n'.join(response_body))
except:
_log.error("Uploaded JSON to %s but got a bad response:", url)
for line in response_body:
_log.error(line)
return False
if parsed_response.get('status') != 'OK':
_log.error("Uploaded JSON to %s but got an error:", url)
_log.error(json.dumps(parsed_response, indent=4))
return False
_log.info("JSON file uploaded to %s.", url)
return True
def _run_tests_set(self, tests):
failures = 0
self._results = []
for i, test in enumerate(tests):
_log.info('Running %s (%d of %d)', test.test_name(), i + 1, len(tests))
start_time = time.time()
metrics = test.run(self._options.time_out_ms)
if metrics:
self._results.append((test, metrics))
else:
failures += 1
_log.error('FAILED')
_log.info('Finished: %f s', time.time() - start_time)
_log.info('')
return failures
|
youtube/cobalt
|
third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
|
Python
|
bsd-3-clause
| 18,577
|
"""
Wrapper around the ``dateutil.rrule`` module.
Provides more consistent behavior with the rfc2445 specification,
notably differing from `dateutil.rrule`` in the handling of the
`dtstart` parameter and the additional handling of a `dtend`
parameter. Also, the `byweekday` parameter in `dateutil.rrule` is
`byday` in this package to reflect the specification. See the `Rule`
and `Recurrence` class documentation for details on the differences.
"""
import re
import datetime
import calendar
import pytz
import dateutil.rrule
from django.conf import settings
from django.utils import dateformat
from django.utils.translation import ugettext as _, pgettext as _p
from django.utils.six import string_types
from recurrence import exceptions
YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY = range(7)
(JANUARY, FEBRUARY, MARCH, APRIL, MAY, JUNE, JULY, AUGUST,
SEPTEMBER, OCTOBER, NOVEMBER, DECEMBER) = range(1, 13)
localtz = pytz.timezone(settings.TIME_ZONE)
class Rule(object):
"""
A recurrence rule.
`Rule` is a representation of a rfc2445 `RECUR` type, used in
the `RRULE` and `EXRULE` properties. More information about the
`RECUR` type specification can be found in the rfc at
http://www.ietf.org/rfc/rfc2445.txt.
An `Rrule` wraps the `dateutil.rrule.rrule` class while adhering
to the rfc2445 spec. Notably a `dtstart` parameter cannot be
specified with a `Rule` unlike `dateutil.rrule.rrule` as only one
`dtstart` can be used with a set of `RRULE` and `EXRULE` rfc2445
properties, therefore the `Recurrence` class (which is based on
`dateutil.rrule.rruleset`) accepts a `dtstart` parameter instead.
`Recurrence` also accepts a `dtend` parameter.
Documentation is largely sourced from the `dateutil.rrule.rrule`
documentation at http://labix.org/python-dateutil
:Variables:
`freq` : int
One of the enumerated constants `YEARLY`, `MONTHLY`,
`WEEKLY`, `DAILY`, `HOURLY`, `MINUTELY`, or `SECONDLY`,
specifying the base recurring frequency.
`interval` : int
The interval between each freq iteration. For example,
when using YEARLY, an interval of 2 means once every two
years, but with HOURLY, it means once every two hours. The
default interval is 1.
`wkst` : int
The week start day. Must be one of the `MO`, `TU`, `WE`,
`TH`, `FR`, `SA`, `SU` constants, or an integer,
specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week
start is got from `calendar.firstweekday()`, and may be
modified by `calendar.setfirstweekday()`.
`count` : int
How many occurrences will be generated by this rule.
`until` : datetime.datetime
If given, this must be a `datetime.datetime` instance,
that will specify the limit of the recurrence. If a
recurrence instance happens to be the same as the
`datetime.datetime` instance given in the `until` keyword,
this will be the last occurrence.
`bysetpos` : int or sequence
If given, it must be either an integer, or a sequence of
integers, positive or negative. Each given integer will
specify an occurrence number, corresponding to the nth
occurrence of the rule inside the frequency period. For
example, a `bysetpos` of `-1` if combined with a `MONTHLY`
frequency, and a `byday` of `(MO, TU, WE, TH, FR)`, will
result in the last work day of every month.
`bymonth` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the months to apply the recurrence to.
`bymonthday` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the month days to apply the recurrence
to.
`byyearday` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the year days to apply the recurrence
to.
`byweekno` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the week numbers to apply the recurrence
to. Week numbers have the meaning described in ISO8601,
that is, the first week of the year is that containing at
least four days of the new year.
`byday` : int or sequence
If given, it must be either an integer `(0 == MO)`, a
sequence of integers, one of the weekday constants `(MO,
TU, ...)`, or a sequence of these constants. When given,
these variables will define the weekdays where the
recurrence will be applied. It's also possible to use an
argument n for the weekday instances, which will mean the
nth occurrence of this weekday in the period. For example,
with `MONTHLY`, or with `YEARLY` and `BYMONTH`, using
`FR(1)` in byweekday will specify the first friday of the
month where the recurrence happens.
`byhour` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the hours to apply the recurrence to.
`byminute` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the minutes to apply the recurrence to.
`bysecond` : int or sequence
If given, it must be either an integer, or a sequence of
integers, meaning the seconds to apply the recurrence to.
"""
byparams = (
'bysetpos', 'bymonth', 'bymonthday', 'byyearday',
'byweekno', 'byday', 'byhour', 'byminute', 'bysecond'
)
frequencies = (
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY'
)
weekdays = (
'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU'
)
firstweekday = calendar.firstweekday()
def __init__(
self, freq,
interval=1, wkst=None, count=None, until=None, **kwargs
):
"""
Create a new rule.
See `Rule` class documentation for available `**kwargs` and
parameter usage.
"""
self.freq = freq
self.interval = interval
self.wkst = wkst
self.count = count
self.until = until
for param in self.byparams:
if param in kwargs:
value = kwargs[param]
if hasattr(value, '__iter__'):
value = list(value)
if not value:
value = []
elif value is not None:
value = [value]
else:
value = []
setattr(self, param, value)
else:
setattr(self, param, [])
def __hash__(self):
byparam_values = []
for param in self.byparams:
byparam_values.append(param)
byparam_values.extend(getattr(self, param, []) or [])
return hash((
self.freq, self.interval, self.wkst, self.count, self.until,
tuple(byparam_values)))
def __eq__(self, other):
if not isinstance(other, Rule):
raise TypeError('object to compare must be Rule object')
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, short=False):
return rule_to_text(self, short)
def to_dateutil_rrule(self, dtstart=None, dtend=None, cache=False):
"""
Create a `dateutil.rrule.rrule` instance from this `Rule`.
:Parameters:
`dtstart` : datetime.datetime
The date/time the recurrence rule starts.
`dtend` : datetime.datetime
The rule should not yield occurrences past this
date. Replaces `until` if `until` is greater than
`dtend`. Note: `dtend` in this case does not count for
an occurrence itself.
`cache` : bool
If given, it must be a boolean value specifying to
enable or disable caching of results. If you will use
the same `dateutil.rrule.rrule` instance multiple
times, enabling caching will improve the performance
considerably.
:Returns:
A `dateutil.rrule.rrule` instance.
"""
kwargs = dict((p, getattr(self, p) or None) for p in self.byparams)
# dateutil.rrule renames the parameter 'byweekday' by we're using
# the parameter name originally specified by rfc2445.
kwargs['byweekday'] = kwargs.pop('byday')
until = self.until
if until:
until = normalize_offset_awareness(until, dtstart)
if dtend:
if until > dtend:
until = dtend
elif dtend:
until = dtend
return dateutil.rrule.rrule(
self.freq, dtstart, self.interval, self.wkst, self.count, until,
cache=cache, **kwargs)
class Recurrence(object):
"""
A combination of `Rule` and `datetime.datetime` instances.
A `Recurrence` instance provides the combined behavior of the
rfc2445 `DTSTART`, `DTEND`, `RRULE`, `EXRULE`, `RDATE`, and
`EXDATE` propeties in generating recurring date/times.
This is a wrapper around the `dateutil.rrule.rruleset` class while
adhering to the rfc2445 spec. Notably a `dtstart` parameter can be
given which cascades to all `dateutil.rrule.rrule` instances
generated by included `Rule` instances. A `dtend` parameter has
also been included to reflect the `DTEND` rfc2445 parameter.
:Variables:
`dtstart` : datetime.datetime
Optionally specify the first occurrence. This defaults to
`datetime.datetime.now()` when the occurrence set is
generated.
`dtend` : datetime.datetime
Optionally specify the last occurrence.
`rrules` : list
A list of `Rule` instances to include in the recurrence
set generation.
`exrules` : list
A list of `Rule` instances to include in the recurrence
set exclusion list. Dates which are part of the given
recurrence rules will not be generated, even if some
inclusive `Rule` or `datetime.datetime` instances matches
them.
`rdates` : list
A list of `datetime.datetime` instances to include in the
occurrence set generation.
`exdates` : list
A list of `datetime.datetime` instances to exclude in the
occurrence set generation. Dates included that way will
not be generated, even if some inclusive `Rule` or
`datetime.datetime` instances matches them.
"""
def __init__(
self, dtstart=None, dtend=None,
rrules=[], exrules=[], rdates=[], exdates=[]
):
"""
Create a new recurrence.
Parameters map directly to instance attributes, see
`Recurrence` class documentation for usage.
"""
self._cache = {}
self.dtstart = dtstart
self.dtend = dtend
self.rrules = list(rrules)
self.exrules = list(exrules)
self.rdates = list(rdates)
self.exdates = list(exdates)
def __iter__(self):
return self.occurrences()
def __unicode__(self):
return serialize(self)
def __hash__(self):
return hash((
self.dtstart, self.dtend,
tuple(self.rrules), tuple(self.exrules),
tuple(self.rdates), tuple(self.exdates)))
def __bool__(self):
if (self.dtstart or self.dtend or
tuple(self.rrules) or tuple(self.exrules) or
tuple(self.rdates) or tuple(self.exdates)):
return True
else:
return False
def __nonzero__(self):
# Required for Python 2 compatibility
return type(self).__bool__(self)
def __eq__(self, other):
if type(other) != type(self):
return False
if not isinstance(other, Recurrence):
raise TypeError('object to compare must be Recurrence object')
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
def occurrences(
self, dtstart=None, dtend=None, cache=False
):
"""
Get a generator yielding `datetime.datetime` instances in this
occurrence set.
:Parameters:
`dtstart` : datetime.datetime
Optionally specify the first occurrence of the
occurrence set. Defaults to `self.dtstart` if specified
or `datetime.datetime.now()` if not when the
occurrence set is generated.
`dtend` : datetime.datetime
Optionally specify the last occurrence of the
occurrence set. Defaults to `self.dtend` if specified.
`cache` : bool
Whether to cache the occurrence set generator.
:Returns:
A sequence of `datetime.datetime` instances.
"""
return self.to_dateutil_rruleset(dtstart, dtend, cache)
def count(self, dtstart=None, dtend=None, cache=False):
"""
Returns the number of occurrences in this occurrence set.
:Parameters:
`dtstart` : datetime.datetime
Optionally specify the first occurrence of the
occurrence set. Defaults to `self.dtstart` if specified
or `datetime.datetime.now()` if not when the
occurrence set is generated.
`dtend` : datetime.datetime
Optionally specify the last occurrence of the
occurrence set. Defaults to `self.dtend` if specified.
`cache` : bool
Whether to cache the occurrence set generator.
:Returns:
The number of occurrences in this occurrence set.
"""
return self.to_dateutil_rruleset(dtstart, dtend, cache).count()
def before(
self, dt, inc=False,
dtstart=None, dtend=None, cache=False
):
"""
Returns the last recurrence before the given
`datetime.datetime` instance.
:Parameters:
`dt` : datetime.datetime
The date to use as the threshold.
`inc` : bool
Defines what happens if `dt` is an occurrence. With
`inc == True`, if `dt` itself is an occurrence, it
will be returned.
`dtstart` : datetime.datetime
Optionally specify the first occurrence of the
occurrence set. Defaults to `self.dtstart` if specified
or `datetime.datetime.now()` if not when the
occurrence set is generated.
`dtend` : datetime.datetime
Optionally specify the last occurrence of the
occurrence set. Defaults to `self.dtend` if specified.
`cache` : bool
Whether to cache the occurrence set generator.
:Returns:
A `datetime.datetime` instance.
"""
return self.to_dateutil_rruleset(
dtstart, dtend, cache).before(dt, inc)
def after(
self, dt, inc=False,
dtstart=None, dtend=None, cache=False
):
"""
Returns the first recurrence after the given
`datetime.datetime` instance.
:Parameters:
`dt` : datetime.datetime
The date to use as the threshold.
`inc` : bool
Defines what happens if `dt` is an occurrence. With
`inc == True`, if `dt` itself is an occurrence, it
will be returned.
`dtstart` : datetime.datetime
Optionally specify the first occurrence of the
occurrence set. Defaults to `self.dtstart` if specified
or `datetime.datetime.now()` if not when the
occurrence set is generated.
`dtend` : datetime.datetime
Optionally specify the last occurrence of the
occurrence set. Defaults to `self.dtend` if specified.
`cache` : bool
Whether to cache the occurrence set generator.
:Returns:
A `datetime.datetime` instance.
"""
return self.to_dateutil_rruleset(dtstart, cache).after(dt, inc)
def between(
self, after, before,
inc=False, dtstart=None, dtend=None, cache=False
):
"""
Returns the first recurrence after the given
`datetime.datetime` instance.
:Parameters:
`after` : datetime.datetime
Return dates after this date.
`before` : datetime.datetime
Return dates before this date.
`inc` : bool
Defines what happens if `after` and/or `before` are
themselves occurrences. With `inc == True`, they will
be included in the list, if they are found in the
occurrence set.
`dtstart` : datetime.datetime
Optionally specify the first occurrence of the
occurrence set. Defaults to `self.dtstart` if specified
or `datetime.datetime.now()` if not when the
occurrence set is generated.
`dtend` : datetime.datetime
Optionally specify the last occurrence of the
occurrence set. Defaults to `self.dtend` if specified.
`cache` : bool
Whether to cache the occurrence set generator.
:Returns:
A sequence of `datetime.datetime` instances.
"""
return self.to_dateutil_rruleset(
dtstart, dtend, cache).between(after, before, inc)
def to_dateutil_rruleset(self, dtstart=None, dtend=None, cache=False):
"""
Create a `dateutil.rrule.rruleset` instance from this
`Recurrence`.
:Parameters:
`dtstart` : datetime.datetime
The date/time the recurrence rule starts. This value
overrides the `dtstart` property specified by the
`Recurrence` instance if its set.
`dtstart` : datetime.datetime
Optionally specify the first occurrence of the
occurrence set. Defaults to `self.dtstart` if specified
or `datetime.datetime.now()` if not when the
occurrence set is generated.
`cache` : bool
If given, it must be a boolean value specifying to
enable or disable caching of results. If you will use
the same `dateutil.rrule.rrule` instance multiple
times, enabling caching will improve the performance
considerably.
:Returns:
A `dateutil.rrule.rruleset` instance.
"""
# all datetimes used in dateutil.rrule objects will need to be
# normalized to either offset-aware or offset-naive datetimes
# to avoid exceptions. dateutil will use the tzinfo from the
# given dtstart, which will cascade to other datetime objects.
dtstart = dtstart or self.dtstart
dtend = dtend or self.dtend
if dtend:
dtend = normalize_offset_awareness(dtend or self.dtend, dtstart)
if cache:
# we need to cache an instance for each unique dtstart
# value because the occurrence values will differ.
cached = self._cache.get(dtstart)
if cached:
return cached
rruleset = dateutil.rrule.rruleset(cache=cache)
for rrule in self.rrules:
rruleset.rrule(rrule.to_dateutil_rrule(dtstart, dtend, cache))
for exrule in self.exrules:
rruleset.exrule(exrule.to_dateutil_rrule(dtstart, dtend, cache))
# RNVY changed : dtstart does not have to be included in the recurrence set
# if dtstart is not None:
# rruleset.rdate(dtstart)
for rdate in self.rdates:
rdate = normalize_offset_awareness(rdate, dtstart)
if dtend is not None and rdate < dtend:
rruleset.rdate(rdate)
elif not dtend:
rruleset.rdate(rdate)
# if dtend is not None:
# rruleset.rdate(dtend)
for exdate in self.exdates:
exdate = normalize_offset_awareness(exdate, dtstart)
if dtend is not None and exdate < dtend:
rruleset.exdate(exdate)
elif not dtend:
rruleset.exdate(exdate)
if cache:
self._cache[dtstart] = rruleset
return rruleset
class Weekday(object):
"""
Representation of a weekday.
A `Weekday` is essentially an integer from 0 to 6, with an
optional `index` which indicates its position in a month. For
example, an `number` of 6 and an `index` of ``-1`` means the last
sunday of the month. Weekday's with a specific index can be
created by calling the existing `MO`, `TU`, `WE`, `TH`, `FR`,
`SA`, `SU` constants::
>>> SU(-1)
-1SU
`Weekday` objects have a smart equality test that can compare
integers, other `Weekday` objects, and string constants as defined
by rfc2445, such as '-1SU'.
"""
def __init__(self, number, index=None):
"""
Create a new weekday constant.
:Parameters:
`number` : int
A number in `range(7)`.
`index` : int
An integer specifying the weekday's position in the
month. A value of ``None`` or ``0`` means the index is
ambiguous and represents all weekdays of that number.
"""
int(number)
if number > 6:
raise ValueError('number must be in range(7)')
self.number = number
self.index = index
def __call__(self, index):
if index == self.index:
return self
else:
return Weekday(self.number, index)
def __hash__(self):
if self.index:
return hash((self.number, self.index))
else:
return hash(self.number)
def __eq__(self, other):
other = to_weekday(other)
return (self.number, self.index) == (other.number, other.index)
def __repr__(self):
if self.index:
return '%s%s' % (self.index, Rule.weekdays[self.number])
else:
return Rule.weekdays[self.number]
weekday = property(lambda self: self.number)
n = property(lambda self: self.index)
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU) = WEEKDAYS = list(map(lambda n: Weekday(n), range(7)))
def to_weekday(token):
"""
Attempt to convert an object to a `Weekday` constant.
:Parameters:
`token` : str, int, dateutil.rrule.weekday or `Weekday`
Can be values such as `MO`, `SU(-2)`, `"-2SU"`, or an
integer like `1` for Tuesday. dateutil.rrule.weekday`
are returned unchanged.
:Returns:
A `dateutil.rrule.weekday` instance.
"""
if isinstance(token, Weekday):
return token
if isinstance(token, dateutil.rrule.weekday):
return Weekday(token.weekday, token.n)
if isinstance(token, int):
if token > 6:
raise ValueError
return WEEKDAYS[token]
elif not token:
raise ValueError
elif isinstance(token, string_types) and token.isdigit():
if int(token) > 6:
raise ValueError
return WEEKDAYS[int(token)]
elif isinstance(token, string_types):
const = token[-2:].upper()
if const not in Rule.weekdays:
raise ValueError
nth = token[:-2]
if not nth:
return Weekday(list(Rule.weekdays).index(const))
else:
return Weekday(list(Rule.weekdays).index(const), int(nth))
def validate(rule_or_recurrence):
if isinstance(rule_or_recurrence, Rule):
obj = Recurrence(rrules=[rule_or_recurrence])
else:
obj = rule_or_recurrence
try:
if not isinstance(obj, Rule) and not isinstance(obj, Recurrence):
raise exceptions.ValidationError('incompatible object')
except TypeError:
raise exceptions.ValidationError('incompatible object')
def validate_dt(dt):
if not isinstance(dt, datetime.datetime):
raise exceptions.ValidationError('invalid datetime: %r' % dt)
def validate_iterable(rule, param):
try:
[v for v in getattr(rule, param, []) if v]
except TypeError:
# TODO: I'm not sure it's possible to get here - all the
# places we call validate_iterable convert single ints to
# sequences, and other types raise TypeErrors earlier.
raise exceptions.ValidationError(
'%s parameter must be iterable' % param)
def validate_iterable_ints(rule, param, min_value=None, max_value=None):
for value in getattr(rule, param, []):
try:
value = int(value)
if min_value is not None:
if value < min_value:
raise ValueError
if max_value is not None:
if value > max_value:
raise ValueError
except ValueError:
raise exceptions.ValidationError(
'invalid %s parameter: %r' % (param, value))
def validate_rule(rule):
# validate freq
try:
Rule.frequencies[int(rule.freq)]
except IndexError:
raise exceptions.ValidationError(
'invalid freq parameter: %r' % rule.freq)
except ValueError:
raise exceptions.ValidationError(
'invalid freq parameter: %r' % rule.freq)
# validate interval
try:
interval = int(rule.interval)
if interval < 1:
raise ValueError
except ValueError:
raise exceptions.ValidationError(
'invalid interval parameter: %r' % rule.interval)
# validate wkst
if rule.wkst:
try:
to_weekday(rule.wkst)
except ValueError:
raise exceptions.ValidationError(
'invalide wkst parameter: %r' % rule.wkst)
# validate until
if rule.until:
try:
validate_dt(rule.until)
except ValueError:
# TODO: I'm not sure it's possible to get here
# (validate_dt doesn't raise ValueError)
raise exceptions.ValidationError(
'invalid until parameter: %r' % rule.until)
# validate count
if rule.count:
try:
int(rule.count)
except ValueError:
raise exceptions.ValidationError(
'invalid count parameter: %r' % rule.count)
# TODO: Should we check that you haven't specified both
# rule.count and rule.until? Note that we only serialize
# rule.until if there's no rule.count.
# validate byparams
for param in Rule.byparams:
validate_iterable(rule, param)
if param == 'byday':
for value in getattr(rule, 'byday', []):
try:
to_weekday(value)
except ValueError:
raise exceptions.ValidationError(
'invalid byday parameter: %r' % value)
elif param == 'bymonth':
validate_iterable_ints(rule, param, 1, 12)
elif param == 'bymonthday':
validate_iterable_ints(rule, param, 1, 31)
elif param == 'byhour':
validate_iterable_ints(rule, param, 0, 23)
elif param == 'byminute':
validate_iterable_ints(rule, param, 0, 59)
elif param == 'bysecond':
validate_iterable_ints(rule, param, 0, 59)
else:
validate_iterable_ints(rule, param)
if obj.dtstart:
validate_dt(obj.dtstart)
if obj.dtend:
validate_dt(obj.dtend)
if obj.rrules:
list(map(lambda rule: validate_rule(rule), obj.rrules))
if obj.exrules:
list(map(lambda rule: validate_rule(rule), obj.exrules))
if obj.rdates:
list(map(lambda dt: validate_dt(dt), obj.rdates))
if obj.exdates:
list(map(lambda dt: validate_dt(dt), obj.exdates))
def serialize(rule_or_recurrence):
"""
Serialize a `Rule` or `Recurrence` instance.
`Rule` instances are wrapped as an rrule in a `Recurrence`
instance before serialization, and will serialize as the `RRULE`
property.
All `datetime.datetime` objects will be converted and serialized
as UTC.
:Returns:
A rfc2445 formatted unicode string.
"""
def serialize_dt(dt):
if not dt.tzinfo:
dt = localtz.localize(dt)
dt = dt.astimezone(pytz.utc)
return u'%s%s%sT%s%s%sZ' % (
str(dt.year).rjust(4, '0'),
str(dt.month).rjust(2, '0'),
str(dt.day).rjust(2, '0'),
str(dt.hour).rjust(2, '0'),
str(dt.minute).rjust(2, '0'),
str(dt.second).rjust(2, '0'),
)
def serialize_rule(rule):
values = []
values.append((u'FREQ', [Rule.frequencies[rule.freq]]))
if rule.interval != 1:
values.append((u'INTERVAL', [str(int(rule.interval))]))
if rule.wkst:
values.append((u'WKST', [Rule.weekdays[rule.wkst]]))
if rule.count is not None:
values.append((u'COUNT', [str(rule.count)]))
elif rule.until is not None:
values.append((u'UNTIL', [serialize_dt(rule.until)]))
if rule.byday:
days = []
for d in rule.byday:
d = to_weekday(d)
# TODO - this if/else copies what Weekday's __repr__
# does - perhaps we should refactor it into a __str__
# method on Weekday?
if d.index:
days.append(u'%s%s' % (d.index, Rule.weekdays[d.number]))
else:
days.append(Rule.weekdays[d.number])
values.append((u'BYDAY', days))
remaining_params = list(Rule.byparams)
remaining_params.remove('byday')
for param in remaining_params:
value_list = getattr(rule, param, None)
if value_list:
values.append((param.upper(), [str(n) for n in value_list]))
return u';'.join(u'%s=%s' % (i[0], u','.join(i[1])) for i in values)
if rule_or_recurrence is None:
return None
try:
validate(rule_or_recurrence)
except exceptions.ValidationError as error:
raise exceptions.SerializationError(error.args[0])
obj = rule_or_recurrence
if isinstance(obj, Rule):
obj = Recurrence(rrules=[obj])
items = []
if obj.dtstart:
if obj.dtstart.tzinfo:
dtstart = serialize_dt(obj.dtstart.astimezone(pytz.utc))
else:
dtstart = serialize_dt(
localtz.localize(obj.dtstart).astimezone(pytz.utc))
items.append((u'DTSTART', dtstart))
if obj.dtend:
if obj.dtend.tzinfo:
dtend = serialize_dt(obj.dtend.astimezone(pytz.utc))
else:
dtend = serialize_dt(
localtz.localize(obj.dtend).astimezone(pytz.utc))
items.append((u'DTEND', dtend))
for rrule in obj.rrules:
items.append((u'RRULE', serialize_rule(rrule)))
for exrule in obj.exrules:
items.append((u'EXRULE', serialize_rule(exrule)))
for rdate in obj.rdates:
if rdate.tzinfo:
rdate = rdate.astimezone(pytz.utc)
else:
rdate = localtz.localize(rdate).astimezone(pytz.utc)
items.append((u'RDATE', serialize_dt(rdate)))
for exdate in obj.exdates:
if exdate.tzinfo:
exdate = exdate.astimezone(pytz.utc)
else:
exdate = localtz.localize(exdate).astimezone(pytz.utc)
items.append((u'EXDATE', serialize_dt(exdate)))
return u'\n'.join(u'%s:%s' % i for i in items)
def deserialize(text):
"""
Deserialize a rfc2445 formatted string.
This is a basic parser that is a partial implementation of rfc2445
which pertains to specifying recurring date/times. Limitations
include:
- Only collects `DTSTART`, `DTEND`, `RRULE`, `EXRULE`, `RDATE`,
and `EXDATE` properties.
- Does not capture parameter options (i.e. RDATE;VALUE=PERIOD).
`dateutil.rrule` does not support anything other than
`DATE-TIME` parameter types.
- `VTIMEZONE` and `TZID` can't be specified, so dates without
the 'Z' marker will be localized to
`settings.TIME_ZONE`. `datetime.datetime` objects in
`Recurrence`/`Rrule` objects will be serialized as UTC.
- The `DTSTART`, `DTEND`, `RDATE` and `EXDATE` properties also
only support the `DATE-TIME` type.
:Returns:
A `Recurrence` instance.
"""
def deserialize_dt(text):
try:
year, month, day = int(text[:4]), int(text[4:6]), int(text[6:8])
except ValueError:
raise exceptions.DeserializationError('malformed date-time: %r' % text)
if u'T' in text:
# time is also specified
try:
hour, minute, second = (
int(text[9:11]), int(text[11:13]), int(text[13:15]))
except ValueError:
raise exceptions.DeserializationError('malformed date-time: %r' % text)
else:
# only date is specified, use midnight
hour, minute, second = (0, 0, 0)
if u'Z' in text:
# time is in utc
tzinfo = pytz.utc
else:
# right now there is no support for VTIMEZONE/TZID since
# this is a partial implementation of rfc2445 so we'll
# just use the time zone specified in the Django settings.
tzinfo = localtz
dt = datetime.datetime(
year, month, day, hour, minute, second, tzinfo=tzinfo)
dt = dt.astimezone(localtz)
# set tz to settings.TIME_ZONE and return offset-naive datetime
return datetime.datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
dtstart, dtend, rrules, exrules, rdates, exdates = None, None, [], [], [], []
tokens = re.compile(
u'(DTSTART|DTEND|RRULE|EXRULE|RDATE|EXDATE)[^:]*:(.*)',
re.MULTILINE).findall(text)
if not tokens and text:
raise exceptions.DeserializationError('malformed data')
for label, param_text in tokens:
if not param_text:
raise exceptions.DeserializationError('empty property: %r' % label)
if u'=' not in param_text:
params = param_text
else:
params = {}
param_tokens = filter(lambda p: p, param_text.split(u';'))
for item in param_tokens:
try:
param_name, param_value = map(
lambda i: i.strip(), item.split(u'=', 1))
except ValueError:
raise exceptions.DeserializationError(
'missing parameter value: %r' % item)
params[param_name] = list(map(
lambda i: i.strip(), param_value.split(u',')))
if label in (u'RRULE', u'EXRULE'):
kwargs = {}
for key, value in params.items():
if key == u'FREQ':
try:
kwargs[str(key.lower())] = list(
Rule.frequencies).index(value[0])
except ValueError:
raise exceptions.DeserializationError(
'bad frequency value: %r' % value[0])
elif key == u'INTERVAL':
try:
kwargs[str(key.lower())] = int(value[0])
except ValueError:
raise exceptions.DeserializationError(
'bad interval value: %r' % value[0])
elif key == u'WKST':
try:
kwargs[str(key.lower())] = to_weekday(value[0])
except ValueError:
raise exceptions.DeserializationError(
'bad weekday value: %r' % value[0])
elif key == u'COUNT':
try:
kwargs[str(key.lower())] = int(value[0])
except ValueError:
raise exceptions.DeserializationError(
'bad count value: %r' % value[0])
elif key == u'UNTIL':
kwargs[str(key.lower())] = deserialize_dt(value[0])
elif key == u'BYDAY':
bydays = []
for v in value:
try:
bydays.append(to_weekday(v))
except ValueError:
raise exceptions.DeserializationError(
'bad weekday value: %r' % v)
kwargs[str(key.lower())] = bydays
elif key.lower() in Rule.byparams:
numbers = []
for v in value:
try:
numbers.append(int(v))
except ValueError:
raise exceptions.DeserializationError(
'bad value: %r' % value)
kwargs[str(key.lower())] = numbers
else:
raise exceptions.DeserializationError('bad parameter: %s' % key)
if 'freq' not in kwargs:
raise exceptions.DeserializationError(
'frequency parameter missing from rule')
if label == u'RRULE':
rrules.append(Rule(**kwargs))
else:
exrules.append(Rule(**kwargs))
elif label == u'DTSTART':
dtstart = deserialize_dt(params)
elif label == u'DTEND':
dtend = deserialize_dt(params)
elif label == u'RDATE':
rdates.append(deserialize_dt(params))
elif label == u'EXDATE':
exdates.append(deserialize_dt(params))
return Recurrence(dtstart, dtend, rrules, exrules, rdates, exdates)
def rule_to_text(rule, short=False):
"""
Render the given `Rule` as natural text.
:Parameters:
`short` : bool
Use abbreviated labels, i.e. 'Fri' instead of 'Friday'.
"""
frequencies = (
_('annually'), _('monthly'), _('weekly'), _('daily'),
_('hourly'), _('minutely'), _('secondly'),
)
timeintervals = (
_('years'), _('months'), _('weeks'), _('days'),
_('hours'), _('minutes'), _('seconds'),
)
if short:
positional_display = {
1: _('1st %(weekday)s'),
2: _('2nd %(weekday)s'),
3: _('3rd %(weekday)s'),
-1: _('last %(weekday)s'),
-2: _('2nd last %(weekday)s'),
-3: _('3rd last %(weekday)s'),
}
weekdays_display = (
_('Mon'), _('Tue'), _('Wed'),
_('Thu'), _('Fri'), _('Sat'), _('Sun'),
)
months_display = (
_('Jan'), _('Feb'), _('Mar'), _('Apr'),
_p('month name', 'May'), _('Jun'), _('Jul'), _('Aug'),
_('Sep'), _('Oct'), _('Nov'), _('Dec'),
)
else:
positional_display = {
1: _('first %(weekday)s'),
2: _('second %(weekday)s'),
3: _('third %(weekday)s'),
4: _('fourth %(weekday)s'),
-1: _('last %(weekday)s'),
-2: _('second last %(weekday)s'),
-3: _('third last %(weekday)s'),
}
weekdays_display = (
_('Monday'), _('Tuesday'), _('Wednesday'),
_('Thursday'), _('Friday'), _('Saturday'), _('Sunday'),
)
months_display = (
_('January'), _('February'), _('March'), _('April'),
_p('month name', 'May'), _('June'), _('July'), _('August'),
_('September'), _('October'), _('November'), _('December'),
)
def get_positional_weekdays(rule):
items = []
if rule.bysetpos and rule.byday:
for setpos in rule.bysetpos:
for byday in rule.byday:
byday = to_weekday(byday)
items.append(
positional_display.get(setpos) % {
'weekday': weekdays_display[byday.number]})
elif rule.byday:
for byday in rule.byday:
byday = to_weekday(byday)
items.append(
positional_display.get(byday.index, '%(weekday)s') % {
'weekday': weekdays_display[byday.number]})
return _(', ').join(items)
parts = []
if rule.interval > 1:
parts.append(
_('every %(number)s %(freq)s') % {
'number': rule.interval,
'freq': timeintervals[rule.freq]
})
else:
parts.append(frequencies[rule.freq])
if rule.freq == YEARLY:
if rule.bymonth:
# bymonths are 1-indexed (January is 1), months_display
# are 0-indexed (January is 0).
items = _(', ').join(
[months_display[month] for month in
[month_index - 1 for month_index in rule.bymonth]])
parts.append(_('each %(items)s') % {'items': items})
if rule.byday or rule.bysetpos:
parts.append(
_('on the %(items)s') % {
'items': get_positional_weekdays(rule)})
if rule.freq == MONTHLY:
if rule.bymonthday:
items = _(', ').join([
dateformat.format(
datetime.datetime(1, 1, day), 'jS')
for day in rule.bymonthday])
parts.append(_('on the %(items)s') % {'items': items})
elif rule.byday:
if rule.byday or rule.bysetpos:
parts.append(
_('on the %(items)s') % {
'items': get_positional_weekdays(rule)})
if rule.freq == WEEKLY:
if rule.byday:
items = _(', ').join([
weekdays_display[to_weekday(day).number]
for day in rule.byday])
parts.append(_('each %(items)s') % {'items': items})
# daily freqencies has no additional formatting,
# hour/minute/second formatting not supported
if rule.count:
if rule.count == 1:
parts.append(_('occuring once'))
else:
parts.append(_('occuring %(number)s times') % {
'number': rule.count})
elif rule.until:
parts.append(_('until %(date)s') % {
'date': dateformat.format(rule.until, 'Y-m-d')})
return _(', ').join(parts)
def normalize_offset_awareness(dt, from_dt=None):
"""
Given two `datetime.datetime` objects, return the second object as
timezone offset-aware or offset-naive depending on the existence
of the first object's tzinfo.
If the second object is to be made offset-aware, it is assumed to
be in the local timezone (with the timezone derived from the
TIME_ZONE setting). If it is to be made offset-naive, It is first
converted to the local timezone before being made naive.
:Parameters:
`dt` : `datetime.datetime`
The datetime object to make offset-aware/offset-naive.
`from_dt` : `datetime.datetime`
The datetime object to test the existence of a tzinfo. If
the value is nonzero, it will be understood as
offset-naive
"""
if from_dt and from_dt.tzinfo and dt.tzinfo:
return dt
elif from_dt and from_dt.tzinfo and not dt.tzinfo:
dt = localtz.localize(dt)
elif dt.tzinfo:
dt = dt.astimezone(localtz)
dt = datetime.datetime(
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second)
return dt
def from_dateutil_rrule(rrule):
"""
Convert a `dateutil.rrule.rrule` instance to a `Rule` instance.
:Returns:
A `Rrule` instance.
"""
kwargs = {}
kwargs['freq'] = rrule._freq
kwargs['interval'] = rrule._interval
if rrule._wkst != 0:
kwargs['wkst'] = rrule._wkst
kwargs['bysetpos'] = rrule._bysetpos
if rrule._count is not None:
kwargs['count'] = rrule._count
elif rrule._until is not None:
kwargs['until'] = rrule._until
days = []
if (rrule._byweekday is not None and (
WEEKLY != rrule._freq or len(rrule._byweekday) != 1 or
rrule._dtstart.weekday() != rrule._byweekday[0])):
# ignore byweekday if freq is WEEKLY and day correlates
# with dtstart because it was automatically set by
# dateutil
days.extend(Weekday(n) for n in rrule._byweekday)
if rrule._bynweekday is not None:
days.extend(Weekday(*n) for n in rrule._bynweekday)
if len(days) > 0:
kwargs['byday'] = days
if rrule._bymonthday is not None and len(rrule._bymonthday) > 0:
if not (rrule._freq <= MONTHLY and len(rrule._bymonthday) == 1 and
rrule._bymonthday[0] == rrule._dtstart.day):
# ignore bymonthday if it's generated by dateutil
kwargs['bymonthday'] = list(rrule._bymonthday)
if rrule._bynmonthday is not None and len(rrule._bynmonthday) > 0:
kwargs.setdefault('bymonthday', []).extend(rrule._bynmonthday)
if rrule._bymonth is not None and len(rrule._bymonth) > 0:
if (rrule._byweekday is not None or
len(rrule._bynweekday or ()) > 0 or not (
rrule._freq == YEARLY and len(rrule._bymonth) == 1 and
rrule._bymonth[0] == rrule._dtstart.month)):
# ignore bymonth if it's generated by dateutil
kwargs['bymonth'] = list(rrule._bymonth)
if rrule._byyearday is not None:
kwargs['byyearday'] = list(rrule._byyearday)
if rrule._byweekno is not None:
kwargs['byweekno'] = list(rrule._byweekno)
kwargs['byhour'] = list(rrule._byhour)
kwargs['byminute'] = list(rrule._byminute)
kwargs['bysecond'] = list(rrule._bysecond)
if (rrule._dtstart.hour in rrule._byhour and
rrule._dtstart.minute in rrule._byminute and
rrule._dtstart.second in rrule._bysecond):
# ignore byhour/byminute/bysecond automatically set by
# dateutil from dtstart
kwargs['byhour'].remove(rrule._dtstart.hour)
kwargs['byminute'].remove(rrule._dtstart.minute)
kwargs['bysecond'].remove(rrule._dtstart.second)
return Rule(**kwargs)
def from_dateutil_rruleset(rruleset):
"""
Convert a `dateutil.rrule.rruleset` instance to a `Recurrence`
instance.
:Returns:
A `Recurrence` instance.
"""
rrules = [from_dateutil_rrule(rrule) for rrule in rruleset._rrule]
exrules = [from_dateutil_rrule(exrule) for exrule in rruleset._exrule]
rdates = rruleset._rdate
exdates = rruleset._exdate
dts = [r._dtstart for r in rruleset._rrule] + rruleset._rdate
if len(dts) > 0:
dts.sort()
dtstart = dts[0]
else:
dtstart = None
return Recurrence(dtstart, rrules, exrules, rdates, exdates)
|
linux2400/django-recurrence
|
recurrence/base.py
|
Python
|
bsd-3-clause
| 48,398
|
from .core import api
from .models import Message
api.create_api(Message, url_prefix='/api/v1')
|
NYCPython/irclogs
|
irclogs/api.py
|
Python
|
bsd-3-clause
| 97
|
for i in range(100):
print i+1;
|
lang010/acit
|
uva/10177/input.py
|
Python
|
bsd-3-clause
| 36
|
# -*- coding: utf-8 -*-
"""
Display the current transfer rates of a tor instance
Configuration parameters:
cache_timeout: An integer specifying the cache life-time of the modules
output in seconds (default 2)
control_address: The address on which the Tor daemon listens for control
connections (default "127.0.0.1")
control_password: The password to use for the Tor control connection
(default None)
control_port: The port on which the Tor daemon listens for control
connections (default 9051)
format: A string describing the output format for the module
(default "↑ {up} ↓ {down}")
format_value: A string describing how to format the transfer rates
(default "[\?min_length=12 {rate:.1f} {unit}]")
rate_unit: The unit to use for the transfer rates
(default "B/s")
si_units: A boolean value selecting whether or not to use SI units
(default False)
Format placeholders:
{down} The incoming transfer rate
{up} The outgoing transfer rate
format_value placeholders:
{rate} The current transfer-rate's value
{unit} The current transfer-rate's unit
Requires:
stem: python module from pypi https://pypi.python.org/pypi/stem
Example:
```
tor_rate {
cache_timeout = 10
format = "IN: {down} | OUT: {up}"
control_port = 1337
control_password = "TertiaryAdjunctOfUnimatrix01"
si_units = True
}
order += "tor_rate"
```
@author Felix Morgner <felix.morgner@gmail.com>
@license 3-clause-BSD
SAMPLE OUTPUT
{'full_text': u'\u2191 652.3 B/s \u2193 938.1 B/s'}
"""
from stem import ProtocolError, SocketError
from stem.connection import AuthenticationFailure
from stem.control import Controller, EventType
ERROR_AUTHENTICATION = 'Error: Failed to authenticate with Tor daemon!'
ERROR_CONNECTION = 'Error: Failed to establish control connection!'
ERROR_PROTOCOL = 'Error: Failed to register event handler!'
class Py3status:
"""
"""
cache_timeout = 2
control_address = '127.0.0.1'
control_password = None
control_port = 9051
format = u'↑ {up} ↓ {down}'
format_value = '[\?min_length=12 {rate:.1f} {unit}]'
rate_unit = 'B/s'
si_units = False
def post_config_hook(self):
self._auth_failure = False
self._down = 0
self._handler_active = False
self._up = 0
def tor_rate(self, outputs, config):
text = ''
if not self._handler_active and not self._auth_failure:
try:
self._register_event_handler()
except ProtocolError:
text = ERROR_PROTOCOL
except SocketError:
text = ERROR_CONNECTION
except AuthenticationFailure:
text = ERROR_AUTHENTICATION
self._auth_failure = True
elif self._auth_failure:
text = ERROR_AUTHENTICATION
else:
text = self.py3.safe_format(self.format, self._get_rates())
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': text,
}
def _get_rates(self):
up, up_unit = self.py3.format_units(self._up,
unit=self.rate_unit,
si=self.si_units)
down, down_unit = self.py3.format_units(self._down,
unit=self.rate_unit,
si=self.si_units)
return {
'up': self.py3.safe_format(self.format_value, {
'rate': up,
'unit': up_unit,
}),
'down': self.py3.safe_format(self.format_value, {
'rate': down,
'unit': down_unit,
}),
}
def _handle_event(self, event):
self._down = event.read
self._up = event.written
def _register_event_handler(self):
self._control = Controller.from_port(address=self.control_address,
port=self.control_port)
if self.control_password:
self._control.authenticate(password=self.control_password)
self._control.add_event_listener(lambda e: self._handle_event(e),
EventType.BW)
self._handler_active = True
if __name__ == "__main__":
from py3status.module_test import module_test
config = {'control_password': 'SevenOfNine', }
module_test(Py3status, config)
|
docwalter/py3status
|
py3status/modules/tor_rate.py
|
Python
|
bsd-3-clause
| 4,540
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ShowurlsConfig(AppConfig):
name = 'showurls'
|
Niklas9/django-showurls
|
django_showurls/apps.py
|
Python
|
bsd-3-clause
| 132
|
# canonical.py - functions for converting systems to canonical forms
# RMM, 10 Nov 2012
from .exception import ControlNotImplemented
from .lti import issiso
from .statesp import StateSpace
from .statefbk import ctrb, obsv
from numpy import zeros, shape, poly, iscomplex, hstack, dot, transpose
from numpy.linalg import solve, matrix_rank, eig
__all__ = ['canonical_form', 'reachable_form', 'observable_form', 'modal_form',
'similarity_transform']
def canonical_form(xsys, form='reachable'):
"""Convert a system into canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state 'x'
form : String
Canonical form for transformation. Chosen from:
* 'reachable' - reachable canonical form
* 'observable' - observable canonical form
* 'modal' - modal canonical form
Returns
-------
zsys : StateSpace object
System in desired canonical form, with state 'z'
T : matrix
Coordinate transformation matrix, z = T * x
"""
# Call the appropriate tranformation function
if form == 'reachable':
return reachable_form(xsys)
elif form == 'observable':
return observable_form(xsys)
elif form == 'modal':
return modal_form(xsys)
else:
raise ControlNotImplemented(
"Canonical form '%s' not yet implemented" % form)
# Reachable canonical form
def reachable_form(xsys):
"""Convert a system into reachable canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state `x`
Returns
-------
zsys : StateSpace object
System in reachable canonical form, with state `z`
T : matrix
Coordinate transformation: z = T * x
"""
# Check to make sure we have a SISO system
if not issiso(xsys):
raise ControlNotImplemented(
"Canonical forms for MIMO systems not yet supported")
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Generate the system matrices for the desired canonical form
zsys.B = zeros(shape(xsys.B))
zsys.B[0, 0] = 1.0
zsys.A = zeros(shape(xsys.A))
Apoly = poly(xsys.A) # characteristic polynomial
for i in range(0, xsys.states):
zsys.A[0, i] = -Apoly[i+1] / Apoly[0]
if (i+1 < xsys.states):
zsys.A[i+1, i] = 1.0
# Compute the reachability matrices for each set of states
Wrx = ctrb(xsys.A, xsys.B)
Wrz = ctrb(zsys.A, zsys.B)
if matrix_rank(Wrx) != xsys.states:
raise ValueError("System not controllable to working precision.")
# Transformation from one form to another
Tzx = solve(Wrx.T, Wrz.T).T # matrix right division, Tzx = Wrz * inv(Wrx)
# Check to make sure inversion was OK. Note that since we are inverting
# Wrx and we already checked its rank, this exception should never occur
if matrix_rank(Tzx) != xsys.states: # pragma: no cover
raise ValueError("Transformation matrix singular to working precision.")
# Finally, compute the output matrix
zsys.C = solve(Tzx.T, xsys.C.T).T # matrix right division, zsys.C = xsys.C * inv(Tzx)
return zsys, Tzx
def observable_form(xsys):
"""Convert a system into observable canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state `x`
Returns
-------
zsys : StateSpace object
System in observable canonical form, with state `z`
T : matrix
Coordinate transformation: z = T * x
"""
# Check to make sure we have a SISO system
if not issiso(xsys):
raise ControlNotImplemented(
"Canonical forms for MIMO systems not yet supported")
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Generate the system matrices for the desired canonical form
zsys.C = zeros(shape(xsys.C))
zsys.C[0, 0] = 1
zsys.A = zeros(shape(xsys.A))
Apoly = poly(xsys.A) # characteristic polynomial
for i in range(0, xsys.states):
zsys.A[i, 0] = -Apoly[i+1] / Apoly[0]
if (i+1 < xsys.states):
zsys.A[i, i+1] = 1
# Compute the observability matrices for each set of states
Wrx = obsv(xsys.A, xsys.C)
Wrz = obsv(zsys.A, zsys.C)
# Transformation from one form to another
Tzx = solve(Wrz, Wrx) # matrix left division, Tzx = inv(Wrz) * Wrx
if matrix_rank(Tzx) != xsys.states:
raise ValueError("Transformation matrix singular to working precision.")
# Finally, compute the output matrix
zsys.B = Tzx * xsys.B
return zsys, Tzx
def modal_form(xsys):
"""Convert a system into modal canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state `x`
Returns
-------
zsys : StateSpace object
System in modal canonical form, with state `z`
T : matrix
Coordinate transformation: z = T * x
"""
# Check to make sure we have a SISO system
if not issiso(xsys):
raise ControlNotImplemented(
"Canonical forms for MIMO systems not yet supported")
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Calculate eigenvalues and matrix of eigenvectors Tzx,
eigval, eigvec = eig(xsys.A)
# Eigenvalues and according eigenvectors are not sorted,
# thus modal transformation is ambiguous
# Sorting eigenvalues and respective vectors by largest to smallest eigenvalue
idx = eigval.argsort()[::-1]
eigval = eigval[idx]
eigvec = eigvec[:,idx]
# If all eigenvalues are real, the matrix of eigenvectors is Tzx directly
if not iscomplex(eigval).any():
Tzx = eigvec
else:
# A is an arbitrary semisimple matrix
# Keep track of complex conjugates (need only one)
lst_conjugates = []
Tzx = None
for val, vec in zip(eigval, eigvec.T):
if iscomplex(val):
if val not in lst_conjugates:
lst_conjugates.append(val.conjugate())
if Tzx is not None:
Tzx = hstack((Tzx, hstack((vec.real.T, vec.imag.T))))
else:
Tzx = hstack((vec.real.T, vec.imag.T))
else:
# if conjugate has already been seen, skip this eigenvalue
lst_conjugates.remove(val)
else:
if Tzx is not None:
Tzx = hstack((Tzx, vec.real.T))
else:
Tzx = vec.real.T
# Generate the system matrices for the desired canonical form
zsys.A = solve(Tzx, xsys.A).dot(Tzx)
zsys.B = solve(Tzx, xsys.B)
zsys.C = xsys.C.dot(Tzx)
return zsys, Tzx
def similarity_transform(xsys, T, timescale=1):
"""Perform a similarity transformation, with option time rescaling.
Transform a linear state space system to a new state space representation
z = T x, where T is an invertible matrix.
Parameters
----------
T : 2D invertible array
The matrix `T` defines the new set of coordinates z = T x.
timescale : float
If present, also rescale the time unit to tau = timescale * t
Returns
-------
zsys : StateSpace object
System in transformed coordinates, with state 'z'
"""
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Define a function to compute the right inverse (solve x M = y)
def rsolve(M, y):
return transpose(solve(transpose(M), transpose(y)))
# Update the system matrices
zsys.A = rsolve(T, dot(T, zsys.A)) / timescale
zsys.B = dot(T, zsys.B) / timescale
zsys.C = rsolve(T, zsys.C)
return zsys
|
roryyorke/python-control
|
control/canonical.py
|
Python
|
bsd-3-clause
| 7,940
|
# System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'mhC'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
# phi_length = 5
# dim_state = (1, phi_length, 3+2)
# range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
# range_state = [[range_state_slice]*phi_length]
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 0, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgentNN(
env_model=env_model, num_sim=num_sim,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
zaxliu/deepnap
|
experiments/kdd-exps/experiment_DynaQNN_130_Feb14_0026.py
|
Python
|
bsd-3-clause
| 5,180
|
#!/usr/bin/env python3
from setuptools import setup
import os
import re
main_py = open(os.path.join('lib', 'urlwatch', '__init__.py')).read()
m = dict(re.findall("\n__([a-z]+)__ = '([^']+)'", main_py))
docs = re.findall('"""(.*?)"""', main_py, re.DOTALL)
m['name'] = 'urlwatch'
m['author'], m['author_email'] = re.match(r'(.*) <(.*)>', m['author']).groups()
m['description'], m['long_description'] = docs[0].strip().split('\n\n', 1)
m['download_url'] = '{url}urlwatch-{version}.tar.gz'.format(**m)
m['install_requires'] = ['minidb', 'PyYAML', 'requests', 'keyring', 'pycodestyle']
m['scripts'] = ['urlwatch']
m['package_dir'] = {'': 'lib'}
m['packages'] = ['urlwatch']
m['data_files'] = [
('share/man/man1', ['share/man/man1/urlwatch.1']),
('share/urlwatch/examples', [
'share/urlwatch/examples/hooks.py.example',
'share/urlwatch/examples/urls.yaml.example',
]),
]
setup(**m)
|
lechuckcaptain/urlwatch
|
setup.py
|
Python
|
bsd-3-clause
| 909
|
from __future__ import unicode_literals
import hashlib
import json
import os
import posixpath
import re
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict # Python 2.6
except:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("'django_future_staticfiles' needs "
"'ordereddict' package installed for use "
"with Python 2.6")
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, get_cache,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves.urllib.parse import (
unquote, urldefrag, urlsplit, urlunsplit,
)
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Returns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
hashed_name = self.stored_name(clean_name)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[self.hash_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally store the processed paths
self.hashed_files.update(hashed_files)
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def stored_name(self, name):
hash_key = self.hash_key(name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
# store the hashed name if there was a miss, e.g.
# when the files are still processed
self.hashed_files[hash_key] = cache_name
return cache_name
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
def __init__(self, *args, **kwargs):
super(ManifestFilesMixin, self).__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode('utf-8')
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
all_post_processed = super(ManifestFilesMixin,
self).post_process(*args, **kwargs)
for post_processed in all_post_processed:
yield post_processed
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode('utf-8')
self._save(self.manifest_name, ContentFile(contents))
class _MappingCache(object):
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(get_cache('staticfiles'))
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
|
dsanders11/django-future-staticfiles
|
django_future_staticfiles/storage.py
|
Python
|
bsd-3-clause
| 14,915
|
import itertools
def calc_key(new_data_perm, old_data, heuristic):
return sum([heuristic(new, old) for (new, old) in zip(new_data_perm, old_data)])
def find_best_match(old_data, new_data, heuristic):
"""
Finds the best way to align arbitrary new data with old data. Tests all possible
permutations of the new data so that heuristic score is maximized.
"""
permutations = itertools.permutations(new_data)
diffs = [(permutation, calc_key(permutation, old_data, heuristic)) for permutation in permutations]
sorted_diffs = sorted(diffs, key=lambda tup: tup[1])
return sorted_diffs[0][0]
|
cuauv/software
|
vision/modules/will_common.py
|
Python
|
bsd-3-clause
| 621
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = os.path.join(os.path.basename(get_executable()))
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
|
tequa/ammisoft
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/pip/_vendor/distlib/scripts.py
|
Python
|
bsd-3-clause
| 15,256
|
from tornado.web import RequestHandler
from swampdragon.default_settings import SwampDragonSettings
from django.conf import settings as django_settings
def get_host():
host = django_settings.DRAGON_URL
if host.endswith('/'):
return host[:-1]
return host
class SettingsHandler(RequestHandler):
def set_default_headers(self):
self.set_header("Content-Type", "application/javascript")
def get(self, *args, **kwargs):
data = '''window.swampdragon_settings = {settings};
window.swampdragon_host = "{host}";
'''.format(**{
'settings': SwampDragonSettings().to_dict(),
'host': get_host()
})
self.write(data)
|
sahlinet/swampdragon
|
swampdragon/settings_provider.py
|
Python
|
bsd-3-clause
| 690
|
##########################################################################
#
# Copyright (c) 2017, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECoreDelightPreviewTest import *
from DelightRenderTest import DelightRenderTest
from InteractiveDelightRenderTest import InteractiveDelightRenderTest
from ModuleTest import ModuleTest
if __name__ == "__main__":
import unittest
unittest.main()
|
appleseedhq/gaffer
|
python/GafferDelightTest/__init__.py
|
Python
|
bsd-3-clause
| 2,045
|
#!/usr/bin/python
######
# b64calc: test buffer and overflow calculation
###
#
# Copyright (C) 2012 Jimmy Scott #jimmy#inet-solutions#be#. Belgium.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. The names of the authors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
######
import base64
def b64calc(s):
# slen = length of input string
slen = len(s)
# blen = length of buffer to allocate to fit the string
blen = ((((slen) + 2) / 3) * 4) + 1
# mlen = max length of string that can fit the buffer
mlen = ((blen / 4) * 3)
# encode string
enc = base64.b64encode(s)
# elen = actual length of base64 encoded string
elen = len(enc)
print "slen = %i // blen = %i // elen = %i // mlen = %i" % \
(slen, blen, elen, mlen)
if (slen > mlen):
print "ERROR: STRING IS LONGER THAN MAX STRING: %i > %i" % \
(slen, mlen)
if (elen >= blen):
print "ERROR: ENC STRING DOES NOT FIT IN BUFFER: %i >= %i" % \
(elen, blen)
for x in xrange(1,64):
b64calc("\xFF" * x)
|
jimmy-scott/prcat
|
test/b64calc.py
|
Python
|
bsd-3-clause
| 2,305
|
# coding: utf-8
import string
import unittest
import random
import os
import platform
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyperclip import _executable_exists, HAS_DISPLAY
from pyperclip.clipboards import (init_osx_clipboard,
init_gtk_clipboard, init_qt_clipboard,
init_xclip_clipboard, init_xsel_clipboard,
init_klipper_clipboard, init_no_clipboard)
from pyperclip.windows import init_windows_clipboard
class _TestClipboard(unittest.TestCase):
clipboard = None
supports_unicode = True
@property
def copy(self):
return self.clipboard[0]
@property
def paste(self):
return self.clipboard[1]
def setUp(self):
if not self.clipboard:
self.skipTest("Clipboard not supported.")
def test_copy_simple(self):
self.copy("pyper\r\nclip")
def test_copy_paste_simple(self):
msg = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(1000))
self.copy(msg)
self.assertEqual(self.paste(), msg)
def test_copy_paste_whitespace(self):
msg = ''.join(random.choice(string.whitespace) for _ in range(1000))
self.copy(msg)
self.assertEqual(self.paste(), msg)
def test_copy_blank(self):
self.copy('TEST')
self.copy('')
self.assertEqual(self.paste(), '')
def test_copy_unicode(self):
if not self.supports_unicode:
raise unittest.SkipTest()
self.copy(u"ಠ_ಠ")
def test_copy_paste_unicode(self):
if not self.supports_unicode:
raise unittest.SkipTest()
msg = u"ಠ_ಠ"
self.copy(msg)
self.assertEqual(self.paste(), msg)
class TestCygwin(_TestClipboard):
if 'cygwin' in platform.system().lower():
clipboard = init_windows_clipboard(True)
class TestWindows(_TestClipboard):
if os.name == 'nt' or platform.system() == 'Windows':
clipboard = init_windows_clipboard()
class TestOSX(_TestClipboard):
if os.name == 'mac' or platform.system() == 'Darwin':
clipboard = init_osx_clipboard()
class TestGtk(_TestClipboard):
if HAS_DISPLAY:
try:
import gtk
except ImportError:
pass
else:
clipboard = init_gtk_clipboard()
class TestQt(_TestClipboard):
if HAS_DISPLAY:
try:
import PyQt4
except ImportError:
pass
else:
clipboard = init_qt_clipboard()
class TestXClip(_TestClipboard):
if _executable_exists("xclip"):
clipboard = init_xclip_clipboard()
class TestXSel(_TestClipboard):
if _executable_exists("xsel"):
clipboard = init_xsel_clipboard()
class TestKlipper(_TestClipboard):
if _executable_exists("klipper") and _executable_exists("qdbus"):
clipboard = init_klipper_clipboard()
class TestNoClipboard(unittest.TestCase):
copy, paste = init_no_clipboard()
def test_copy(self):
with self.assertRaises(RuntimeError):
self.copy("foo")
def test_paste(self):
with self.assertRaises(RuntimeError):
self.paste()
if __name__ == '__main__':
unittest.main()
|
ZEDGR/pyperclip
|
tests/test_copy_paste.py
|
Python
|
bsd-3-clause
| 3,313
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class BlankPage(page_module.Page):
def __init__(self, url, page_set):
super(BlankPage, self).__init__(url, page_set=page_set)
class BlankPageSet(story.StorySet):
"""A single blank page."""
def __init__(self):
super(BlankPageSet, self).__init__()
self.AddUserStory(BlankPage('file://blank_page/blank_page.html', self))
|
SaschaMester/delicium
|
tools/perf/page_sets/blank_page.py
|
Python
|
bsd-3-clause
| 578
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='vuyrHnNU3DKLn6z9G%G{')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
|
angryjoe/propertytrack
|
config/settings/local.py
|
Python
|
bsd-3-clause
| 1,920
|
from django.db import models
import fernet_fields as fields
class EncryptedText(models.Model):
value = fields.EncryptedTextField()
class EncryptedChar(models.Model):
value = fields.EncryptedCharField(max_length=25)
class EncryptedEmail(models.Model):
value = fields.EncryptedEmailField()
class EncryptedInt(models.Model):
value = fields.EncryptedIntegerField()
class EncryptedDate(models.Model):
value = fields.EncryptedDateField()
class EncryptedDateTime(models.Model):
value = fields.EncryptedDateTimeField()
class EncryptedNullable(models.Model):
value = fields.EncryptedIntegerField(null=True)
|
orcasgit/django-fernet-fields
|
fernet_fields/test/models.py
|
Python
|
bsd-3-clause
| 641
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-filebrowser-no-grappelli',
version='3.5.7',
description='Media-Management no Grappelli',
long_description = read('README.rst'),
url = 'https://github.com/smacker/django-filebrowser-no-grappelli',
download_url='',
author='Patrick Kranzlmueller, Axel Swoboda (vonautomatisch)',
author_email='office@vonautomatisch.at',
maintainer='Maxim Sukharev',
maintainer_email='max@smacker.ru',
license='BSD',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
zip_safe = False,
)
|
haight6716/filebrowser
|
setup.py
|
Python
|
bsd-3-clause
| 1,362
|
from abc import ABCMeta, abstractmethod
import logging
import json
import pandas as pd
from six import with_metaclass
# doctest-only imports
from topik.preprocessing import preprocess
from topik.readers import read_input
from topik.tests import test_data_path
from topik.intermediaries.persistence import Persistor
registered_models = {}
def register_model(cls):
global registered_models
if cls.__name__ not in registered_models:
registered_models[cls.__name__] = cls
return cls
class TopicModelBase(with_metaclass(ABCMeta)):
corpus = None
@abstractmethod
def get_top_words(self, topn):
"""Method should collect top n words per topic, translate indices/ids to words.
Return a list of lists of tuples:
- outer list: topics
- inner lists: length topn collection of (weight, word) tuples
"""
pass
@abstractmethod
def save(self, filename, saved_data):
self.persistor.store_model(self.get_model_name_with_parameters(),
{"class": self.__class__.__name__,
"saved_data": saved_data})
self.corpus.save(filename)
@abstractmethod
def get_model_name_with_parameters(self):
raise NotImplementedError
def termite_data(self, filename=None, topn_words=15):
"""Generate the csv file input for the termite plot.
Parameters
----------
filename: string
Desired name for the generated csv file
>>> raw_data = read_input('{}/test_data_json_stream.json'.format(test_data_path), "abstract")
>>> processed_data = preprocess(raw_data) # preprocess returns a DigestedDocumentCollection
>>> model = registered_models["LDA"](processed_data, ntopics=3)
>>> model.termite_data('termite.csv', 15)
"""
count = 1
for topic in self.get_top_words(topn_words):
if count == 1:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df_temp
else:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df.append(df_temp, ignore_index=True)
count += 1
if filename:
logging.info("saving termite plot input csv file to %s " % filename)
df.to_csv(filename, index=False, encoding='utf-8')
return
return df
@property
def persistor(self):
return self.corpus.persistor
def load_model(filename, model_name):
"""Loads a JSON file containing instructions on how to load model data.
Returns a TopicModelBase-derived object."""
p = Persistor(filename)
if model_name in p.list_available_models():
data_dict = p.get_model_details(model_name)
model = registered_models[data_dict['class']](**data_dict["saved_data"])
else:
raise NameError("Model name {} has not yet been created.".format(model_name))
return model
|
lewismc/topik
|
topik/models/model_base.py
|
Python
|
bsd-3-clause
| 3,158
|
import datetime
import elasticsearch_dsl.filter as es_filter
from elasticsearch_dsl import aggs, query, SF
from rest_framework.filters import BaseFilterBackend
from mkt.games.constants import GAME_CATEGORIES
class DailyGamesFilter(BaseFilterBackend):
"""
Randomly chooses 4 games, one from each featured game category, based off
of the current date such that the games are shuffled daily.
The query:
- Selects only games that match the featured game category tags.
- Scores randomly using random_score using date as seed.
- Buckets by tag, using Top Hits with size=1 to select only one game
from each category.
- elastic.co/guide/en/elasticsearch/guide/current/top-hits.html
"""
def filter_queryset(self, request, queryset, view):
daily_seed = int(datetime.datetime.now().strftime('%Y%m%d'))
# For each game category, create a query that matches the tag, orders
# randomly based on the day.
generate_game_category_query = (lambda cat: query.Q(
'function_score',
# Consistently random based on the day.
functions=[SF('random_score', seed=daily_seed)],
filter=es_filter.Bool(must=[es_filter.Term(tags=cat)]),
))
# Map over the game categories to create a function score query for one
# and dump it into a Bool should.
game_query = query.Bool(should=map(generate_game_category_query,
GAME_CATEGORIES))
# Run a size=1 TopHits aggregation to only select one game from each
# tag. Results will have to be pulled out of S.execute().aggregations
# rather than S.execute().hits.
top_hits = aggs.TopHits(size=1)
a = aggs.A('terms', field='tags', aggs={'first_game': top_hits})
queryset = queryset.query(game_query)
queryset.aggs.bucket('top_hits', a) # Not chainable.
return queryset
|
Hitechverma/zamboni
|
mkt/games/filters.py
|
Python
|
bsd-3-clause
| 1,969
|
from swampdragon_auth.socketconnection import HttpDataConnection
from .online import user_manager
class Connection(HttpDataConnection):
def __init__(self, session):
self._user = None
super(Connection, self).__init__(session)
def on_heartbeat(self):
if self.user:
user_manager.add_user(self.user.pk)
super(Connection, self).on_heartbeat()
def on_open(self, request):
super(Connection, self).on_open(request)
def on_close(self):
super(Connection, self).on_close()
user_manager.remove_user(self.user.pk)
|
jonashagstedt/swampdragon-notifications
|
swampdragon_notifications/notification_connection.py
|
Python
|
bsd-3-clause
| 590
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import verificationresult
from .fhirdate import FHIRDate
class VerificationResultTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("VerificationResult", js["resourceType"])
return verificationresult.VerificationResult(js)
def testVerificationResult1(self):
inst = self.instantiate_from("verificationresult-example.json")
self.assertIsNotNone(inst, "Must have instantiated a VerificationResult instance")
self.implVerificationResult1(inst)
js = inst.as_json()
self.assertEqual("VerificationResult", js["resourceType"])
inst2 = verificationresult.VerificationResult(js)
self.implVerificationResult1(inst2)
def implVerificationResult1(self, inst):
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.status, "attested")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Generated Narrative with Details</b></p><p><b>id</b>: example</p><p><b>status</b>: attested</p></div>")
self.assertEqual(inst.text.status, "generated")
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/verificationresult_tests.py
|
Python
|
bsd-3-clause
| 1,737
|
from .id3 import Id3Estimator
from . import id3
from .export import export_graphviz
from .export import export_text
__all__ = ['Id3Estimator', 'export_graphviz', 'export_text']
|
svaante/decision-tree-id3
|
id3/__init__.py
|
Python
|
bsd-3-clause
| 178
|
from setuptools import setup, find_packages
version = '1.0.7'
LONG_DESCRIPTION = """
How to use django-bootstrap-pagination
----------------------------
``django-bootstrap-pagination`` allows for easy Digg-style pagination without modifying
your views.
There are really 5 steps to setting it up with your projects (not including
installation, which is covered in INSTALL.txt in this same directory.)
1. List this application in the ``INSTALLED_APPS`` portion of your settings
file. Your settings file might look something like::
INSTALLED_APPS = (
# ...
'bootstrap_pagination',
)
2. Install the pagination middleware. Your settings file might look something
like::
MIDDLEWARE_CLASSES = (
# ...
'bootstrap_pagination.middleware.PaginationMiddleware',
)
3. If it's not already added in your setup, add the request context processor.
Note that context processors are set by default implicitly, so to set them
explicitly, you need to copy and paste this code into your under
the value TEMPLATE_CONTEXT_PROCESSORS::
("django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request")
4. Add this line at the top of your template to load the pagination tags:
{% load pagination_tags %}
5. Decide on a variable that you would like to paginate, and use the
autopaginate tag on that variable before iterating over it. This could
take one of two forms (using the canonical ``object_list`` as an example
variable):
{% autopaginate object_list %}
This assumes that you would like to have the default 20 results per page.
If you would like to specify your own amount of results per page, you can
specify that like so:
{% autopaginate object_list 10 %}
Note that this replaces ``object_list`` with the list for the current page, so
you can iterate over the ``object_list`` like you normally would.
6. Now you want to display the current page and the available pages, so
somewhere after having used autopaginate, use the paginate inclusion tag:
{% paginate %}
This does not take any arguments, but does assume that you have already
called autopaginate, so make sure to do so first.
That's it! You have now paginated ``object_list`` and given users of the site
a way to navigate between the different pages--all without touching your views.
Optional Settings
------------------
In django-pagination, there are no required settings. There are, however, a
small set of optional settings useful for changing the default behavior of the
pagination tags. Here's an overview:
``PAGINATION_DEFAULT_PAGINATION``
The default amount of items to show on a page if no number is specified.
``PAGINATION_DEFAULT_WINDOW``
The number of items to the left and to the right of the current page to
display (accounting for ellipses).
``PAGINATION_DEFAULT_ORPHANS``
The number of orphans allowed. According to the Django documentation,
orphans are defined as::
The minimum number of items allowed on the last page, defaults to zero.
``PAGINATION_INVALID_PAGE_RAISES_404``
Determines whether an invalid page raises an ``Http404`` or just sets the
``invalid_page`` context variable. ``True`` does the former and ``False``
does the latter.
"""
setup(
name='django-bootstrap-pagination',
version=version,
description="django-bootstrap-pagination",
long_description=LONG_DESCRIPTION,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
],
keywords='bootstrap,pagination,django',
author='Eric Florenzano',
author_email='floguy@gmail.com',
url='https://github.com/tgdn/django-bootstrap-pagination',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
|
tgdn/django-bootstrap-pagination
|
setup.py
|
Python
|
bsd-3-clause
| 4,189
|
from django.conf.urls.defaults import patterns, url
from django.views.decorators.cache import cache_page
from django.views.i18n import javascript_catalog
urlpatterns = patterns('pto.views',
url(r'^$', 'home', name='pto.home'),
url(r'^calculate_pto\.json$', 'calculate_pto', name='pto.calculate_pto'),
# Javascript translations.
url('^jsi18n.js$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['pto']}, name='jsi18n'),
)
|
kumar303/pto-planner
|
apps/pto/urls.py
|
Python
|
bsd-3-clause
| 491
|
import datetime
import re
import time
from collections import namedtuple
from django.conf import settings
from django.core.management.base import BaseCommand
from trello import ResourceUnavailable, TrelloClient
from core.models import Event
# Create new command
class Command(BaseCommand):
help = 'Syncs event in trello board. Need a token.'
missing_args_message = (
'You need to add a token! Get one here: '
'https://trello.com/1/authorize?key=01ab0348ca020573e7f728ae7400928a&scope=read%2Cwrite&'
'name=My+Application&expiration=1hour&response_type=token'
)
def add_arguments(self, parser):
parser.add_argument('trello_token', type=str)
def handle(self, *args, **options):
token = options['trello_token']
events = event_list()
sync(events, token)
# Get data
EventTuple = namedtuple('EventTuple', 'name id city date')
def event_list():
event = Event.objects.all()
result = []
for e in event:
name = e.name
_id = str(e.pk)
city = e.city
date = datetime.date(e.date.year, e.date.month, e.date.day or 1)
result.append(EventTuple(name, _id, city, date))
return result
# Sync to trello
ADMIN_BASE_URL = 'https://djangogirls.org/admin/core/event/'
def sync(events, token):
trello = TrelloClient(api_key=settings.TRELLO_API_KEY, token=token)
board = trello.get_board('55f7167c46760fcb5d68b385')
far_away, less_2_months, less_1_month, less_1_week, today, past = board.all_lists()
all_cards = {card_id(c): c for c in board.all_cards()}
date_today = datetime.date.today()
for e in events:
card = all_cards.get(e.id)
if not card:
card = create_card(e, far_away)
create_checklist(card)
# fetch card to get due date
try:
card.fetch()
except ResourceUnavailable:
print("Oopsie: too many requests! Let's wait 10 seconds!")
time.sleep(10)
card.fetch()
if e.date != card.due_date.date():
print('Changing due date of {} to {}'.format(e.city, e.date))
card.set_due(e.date)
distance = (e.date - date_today).days
if distance < 0:
right_list = past
elif distance == 0:
right_list = today
elif distance < 7:
right_list = less_1_week
elif distance < 30:
right_list = less_1_month
elif distance < 60:
right_list = less_2_months
else:
right_list = far_away
ensure_card_in_list(card, right_list)
def card_id(card):
m = re.search(ADMIN_BASE_URL + r'(\d+)',
card.desc)
return m.group(1)
def create_card(event, list):
print('Creating card {} ({})'.format(event.city, event.date.isoformat()))
return list.add_card(name=event.city,
desc=ADMIN_BASE_URL + event.id,
due=event.date.isoformat())
def create_checklist(card):
card.add_checklist("Things to do:", [
"2 month check", "1 month check", "Thank you email and request for stats", "Stats obtained"])
def ensure_checklist_in_card(card):
if not card.checklists:
print("Adding checklist to {} card.".format(card.name))
create_checklist(card)
def ensure_card_in_list(card, list):
if card.list_id != list.id:
print('Moving {} to {}'.format(
card.name, list.name))
card.change_list(list.id)
|
DjangoGirls/djangogirls
|
core/management/commands/sync_events_dashboard.py
|
Python
|
bsd-3-clause
| 3,541
|
import os
import datetime
import time
from ...utils import parse_utc
from .. import run_python_module
from .base import BaseTestApp
class TestNbGraderSubmit(BaseTestApp):
def _release_and_fetch(self, assignment, exchange, cache):
self._copy_file("files/test.ipynb", "release/ps1/p1.ipynb")
run_python_module([
"nbgrader", "release", assignment,
"--course", "abc101",
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
])
run_python_module([
"nbgrader", "fetch", assignment,
"--course", "abc101",
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
])
def _submit(self, assignment, exchange, cache, flags=None, retcode=0):
cmd = [
"nbgrader", "submit", assignment,
"--course", "abc101",
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_python_module(cmd, retcode=retcode)
def test_help(self):
"""Does the help display without error?"""
run_python_module(["nbgrader", "submit", "--help-all"])
def test_no_course_id(self, exchange, cache):
"""Does releasing without a course id thrown an error?"""
self._release_and_fetch("ps1", exchange, cache)
cmd = [
"nbgrader", "submit", "ps1",
"--TransferApp.cache_directory={}".format(cache),
"--TransferApp.exchange_directory={}".format(exchange)
]
run_python_module(cmd, retcode=1)
def test_submit(self, exchange, cache):
self._release_and_fetch("ps1", exchange, cache)
now = datetime.datetime.now()
time.sleep(1)
self._submit("ps1", exchange, cache)
filename, = os.listdir(os.path.join(exchange, "abc101", "inbound"))
username, assignment, timestamp1 = filename.split("+")
assert username == os.environ['USER']
assert assignment == "ps1"
assert parse_utc(timestamp1) > now
assert os.path.isfile(os.path.join(exchange, "abc101", "inbound", filename, "p1.ipynb"))
assert os.path.isfile(os.path.join(exchange, "abc101", "inbound", filename, "timestamp.txt"))
with open(os.path.join(exchange, "abc101", "inbound", filename, "timestamp.txt"), "r") as fh:
assert fh.read() == timestamp1
filename, = os.listdir(os.path.join(cache, "abc101"))
username, assignment, timestamp1 = filename.split("+")
assert username == os.environ['USER']
assert assignment == "ps1"
assert parse_utc(timestamp1) > now
assert os.path.isfile(os.path.join(cache, "abc101", filename, "p1.ipynb"))
assert os.path.isfile(os.path.join(cache, "abc101", filename, "timestamp.txt"))
with open(os.path.join(cache, "abc101", filename, "timestamp.txt"), "r") as fh:
assert fh.read() == timestamp1
time.sleep(1)
self._submit("ps1", exchange, cache)
assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 2
filename = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound")))[1]
username, assignment, timestamp2 = filename.split("+")
assert username == os.environ['USER']
assert assignment == "ps1"
assert parse_utc(timestamp2) > parse_utc(timestamp1)
assert os.path.isfile(os.path.join(exchange, "abc101", "inbound", filename, "p1.ipynb"))
assert os.path.isfile(os.path.join(exchange, "abc101", "inbound", filename, "timestamp.txt"))
with open(os.path.join(exchange, "abc101", "inbound", filename, "timestamp.txt"), "r") as fh:
assert fh.read() == timestamp2
assert len(os.listdir(os.path.join(cache, "abc101"))) == 2
filename = sorted(os.listdir(os.path.join(cache, "abc101")))[1]
username, assignment, timestamp2 = filename.split("+")
assert username == os.environ['USER']
assert assignment == "ps1"
assert parse_utc(timestamp2) > parse_utc(timestamp1)
assert os.path.isfile(os.path.join(cache, "abc101", filename, "p1.ipynb"))
assert os.path.isfile(os.path.join(cache, "abc101", filename, "timestamp.txt"))
with open(os.path.join(cache, "abc101", filename, "timestamp.txt"), "r") as fh:
assert fh.read() == timestamp2
|
dementrock/nbgrader
|
nbgrader/tests/apps/test_nbgrader_submit.py
|
Python
|
bsd-3-clause
| 4,574
|
"""
Wrappers over Docker resource types to aid in setup/teardown of and interaction
with Docker resources.
"""
import functools
from docker import models
from seaworthy.helpers import DockerHelper
from seaworthy.stream.logs import stream_logs, wait_for_logs_matching
from seaworthy.stream.matchers import RegexMatcher, UnorderedMatcher
# This is a hack to control our generated documentation. The value of the
# attribute is ignored, only its presence or absence can be detected by the
# apigen machinery.
__apigen_inherited_members__ = None
def deep_merge(*dicts):
"""
Recursively merge all input dicts into a single dict.
"""
result = {}
for d in dicts:
if not isinstance(d, dict):
raise Exception('Can only deep_merge dicts, got {}'.format(d))
for k, v in d.items():
# Whenever the value is a dict, we deep_merge it. This ensures that
# (a) we only ever merge dicts with dicts and (b) we always get a
# deep(ish) copy of the dicts and are thus safe from accidental
# mutations to shared state.
if isinstance(v, dict):
v = deep_merge(result.get(k, {}), v)
result[k] = v
return result
class _DefinitionBase:
__model_type__ = None
def __init__(self, name, create_kwargs=None, helper=None):
self.name = name
self._create_args = ()
self._create_kwargs = {} if create_kwargs is None else create_kwargs
self._helper = None
self.set_helper(helper)
self._inner = None
def create(self, **kwargs):
"""
Create an instance of this resource definition.
Only one instance may exist at any given time.
"""
if self.created:
raise RuntimeError(
'{} already created.'.format(self.__model_type__.__name__))
kwargs = self.merge_kwargs(self._create_kwargs, kwargs)
self._inner = self.helper.create(
self.name, *self._create_args, **kwargs)
def remove(self, **kwargs):
"""
Remove an instance of this resource definition.
"""
self.helper.remove(self.inner(), **kwargs)
self._inner = None
def setup(self, helper=None, **create_kwargs):
"""
Setup this resource so that is ready to be used in a test. If the
resource has already been created, this call does nothing.
For most resources, this just involves creating the resource in Docker.
:param helper:
The resource helper to use, if one was not provided when this
resource definition was created.
:param **create_kwargs: Keyword arguments passed to :meth:`.create`.
:returns:
This definition instance. Useful for creating and setting up a
resource in a single step::
volume = VolumeDefinition('volly').setup(helper=docker_helper)
"""
if self.created:
return
self.set_helper(helper)
self.create(**create_kwargs)
return self
def teardown(self):
"""
Teardown this resource so that it no longer exists in Docker. If the
resource has already been removed, this call does nothing.
For most resources, this just involves removing the resource in Docker.
"""
if not self.created:
return
self.remove()
def __enter__(self):
return self.setup()
def __exit__(self, exc_type, exc_val, exc_tb):
self.teardown()
@property
def helper(self):
if self._helper is None:
raise RuntimeError('No helper set.')
return self._helper
def set_helper(self, helper):
"""
.. todo::
Document this.
"""
# We don't want to "unset" in this method.
if helper is None:
return
# Get the right kind of helper if given a DockerHelper
if isinstance(helper, DockerHelper):
helper = helper._helper_for_model(self.__model_type__)
# We already have this one.
if helper is self._helper:
return
if self._helper is None:
self._helper = helper
else:
raise RuntimeError('Cannot replace existing helper.')
def as_fixture(self, name=None):
"""
A decorator to inject this container into a function as a test fixture.
"""
if name is None:
name = self.name
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kw):
with self:
kw[name] = self
return f(*args, **kw)
return wrapper
return deco
def inner(self):
"""
:returns: the underlying Docker model object
"""
if not self.created:
raise RuntimeError(
'{} not created yet.'.format(self.__model_type__.__name__))
return self._inner
@property
def created(self):
return self._inner is not None
def base_kwargs(self):
"""
Override this method to provide dynamically generated base kwargs for
the resource.
"""
return {}
def merge_kwargs(self, default_kwargs, kwargs):
"""
Override this method to merge kwargs differently.
"""
return deep_merge(self.base_kwargs(), default_kwargs, kwargs)
class ContainerDefinition(_DefinitionBase):
"""
This is the base class for container definitions. Instances (and instances
of subclasses) are intended to be used both as test fixtures and as
convenient objects for operating on containers being tested.
.. todo::
Document this properly.
A container object may be used as a context manager to ensure proper setup
and teardown of the container around the code that uses it::
with ContainerDefinition('my_container', IMAGE, helper=ch) as c:
assert c.status() == 'running'
(Note that this only works if the container has a helper set and does not
have a container created.)
"""
__model_type__ = models.containers.Container
WAIT_TIMEOUT = 10.0
def __init__(self, name, image, wait_patterns=None, wait_timeout=None,
create_kwargs=None, helper=None):
"""
:param name:
The name for the container. The actual name of the container is
namespaced by ContainerHelper. This name will be used as a network
alias for the container.
:param image: image tag to use
:param list wait_patterns:
Regex patterns to use when checking that the container has started
successfully.
:param wait_timeout:
Number of seconds to wait for the ``wait_patterns``. Defaults to
``self.WAIT_TIMEOUT``.
:param dict create_kwargs:
Other kwargs to use when creating the container.
:param seaworthy.helper.ContainerHelper helper:
A ContainerHelper instance used to create containers.
"""
super().__init__(name, create_kwargs=create_kwargs, helper=helper)
self._create_args = (image,)
if wait_patterns:
self.wait_matchers = [RegexMatcher(p) for p in wait_patterns]
else:
self.wait_matchers = None
if wait_timeout is not None:
self.wait_timeout = wait_timeout
else:
self.wait_timeout = self.WAIT_TIMEOUT
self._http_clients = []
def setup(self, helper=None, **run_kwargs):
"""
Creates the container, starts it, and waits for it to completely start.
:param helper:
The resource helper to use, if one was not provided when this
container definition was created.
:param **run_kwargs: Keyword arguments passed to :meth:`.run`.
:returns:
This container definition instance. Useful for creating and setting
up a container in a single step::
con = ContainerDefinition('conny', 'nginx').setup(helper=dh)
"""
if self.created:
return
self.set_helper(helper)
self.run(**run_kwargs)
self.wait_for_start()
return self
def teardown(self):
"""
Stop and remove the container if it exists.
"""
while self._http_clients:
self._http_clients.pop().close()
if self.created:
self.halt()
def status(self):
"""
Get the container's current status from Docker.
If the container does not exist (before creation and after removal),
the status is ``None``.
"""
if not self.created:
return None
self.inner().reload()
return self.inner().status
def start(self):
"""
Start the container. The container must have been created.
"""
self.inner().start()
self.inner().reload()
def stop(self, timeout=5):
"""
Stop the container. The container must have been created.
:param timeout:
Timeout in seconds to wait for the container to stop before sending
a ``SIGKILL``. Default: 5 (half the Docker default)
"""
self.inner().stop(timeout=timeout)
self.inner().reload()
def run(self, fetch_image=True, **kwargs):
"""
Create the container and start it. Similar to ``docker run``.
:param fetch_image:
Whether to try pull the image if it's not found. The behaviour here
is similar to ``docker run`` and this parameter defaults to
``True``.
:param **kwargs: Keyword arguments passed to :meth:`.create`.
"""
self.create(fetch_image=fetch_image, **kwargs)
self.start()
def wait_for_start(self):
"""
Wait for the container to start.
By default this will wait for the log lines matching the patterns
passed in the ``wait_patterns`` parameter of the constructor using an
UnorderedMatcher. For more advanced checks for container startup, this
method should be overridden.
"""
if self.wait_matchers:
matcher = UnorderedMatcher(*self.wait_matchers)
self.wait_for_logs_matching(matcher, timeout=self.wait_timeout)
def halt(self, stop_timeout=5):
"""
Stop the container and remove it. The opposite of :meth:`run`.
"""
self.stop(timeout=stop_timeout)
self.remove()
def clean(self):
"""
This method should "clean" the container so that it is in the same
state as it was when it was started. It is up to the implementer of
this method to decide how the container should be cleaned. See
:func:`~seaworthy.pytest.fixtures.clean_container_fixtures` for how
this can be used with pytest fixtures.
"""
raise NotImplementedError()
@property
def ports(self):
"""
The ports (exposed and published) of the container.
"""
return self.inner().attrs['NetworkSettings']['Ports']
def _host_port(self, port_spec, index):
if port_spec not in self.ports:
raise ValueError("Port '{}' is not exposed".format(port_spec))
mappings = self.ports[port_spec]
if mappings is None:
raise ValueError(
"Port '{}' is not published to the host".format(port_spec))
mapping = mappings[index]
return mapping['HostIp'], mapping['HostPort']
def get_host_port(self, container_port, proto='tcp', index=0):
"""
:param container_port: The container port.
:param proto: The protocol ('tcp' or 'udp').
:param index: The index of the mapping entry to return.
:returns: A tuple of the interface IP and port on the host.
"""
port_spec = '{}/{}'.format(container_port, proto)
return self._host_port(port_spec, index)
def get_first_host_port(self):
"""
Get the first mapping of the first (lowest) container port that has a
mapping. Useful when a container publishes only one port.
Note that unlike the Docker API, which sorts ports lexicographically
(e.g. ``90/tcp`` > ``8000/tcp``), we sort ports numerically so that the
lowest port is always chosen.
"""
mapped_ports = {p: m for p, m in self.ports.items() if m is not None}
if not mapped_ports:
raise RuntimeError('Container has no published ports')
def sort_key(port_string):
port, proto = port_string.split('/', 1)
return int(port), proto
firt_port_spec = sorted(mapped_ports.keys(), key=sort_key)[0]
return self._host_port(firt_port_spec, 0)
def get_logs(self, stdout=True, stderr=True, timestamps=False, tail='all',
since=None):
"""
Get container logs.
This method does not support streaming, use :meth:`stream_logs` for
that.
"""
return self.inner().logs(
stdout=stdout, stderr=stderr, timestamps=timestamps, tail=tail,
since=since)
def stream_logs(self, stdout=True, stderr=True, tail='all', timeout=10.0):
"""
Stream container output.
"""
return stream_logs(
self.inner(), stdout=stdout, stderr=stderr, tail=tail,
timeout=timeout)
def wait_for_logs_matching(self, matcher, timeout=10, encoding='utf-8',
**logs_kwargs):
"""
Wait for logs matching the given matcher.
"""
wait_for_logs_matching(
self.inner(), matcher, timeout=timeout, encoding=encoding,
**logs_kwargs)
def http_client(self, port=None):
"""
Construct an HTTP client for this container.
"""
# Local import to avoid potential circularity.
from seaworthy.client import ContainerHttpClient
client = ContainerHttpClient.for_container(self, container_port=port)
self._http_clients.append(client)
return client
class NetworkDefinition(_DefinitionBase):
"""
This is the base class for network definitions.
.. todo::
Document this properly.
"""
__model_type__ = models.networks.Network
class VolumeDefinition(_DefinitionBase):
"""
This is the base class for volume definitions.
The following is an example of how ``VolumeDefinition`` can be used to
attach volumes to a container::
from seaworthy.definitions import ContainerDefinition
class DjangoContainer(ContainerDefinition):
IMAGE = "seaworthy-demo:django"
WAIT_PATTERNS = (r"Booting worker",)
def __init__(self, name, socket_volume, static_volume, db_url):
super().__init__(name, self.IMAGE, self.WAIT_PATTERNS)
self.socket_volume = socket_volume
self.static_volume = static_volume
self.db_url = db_url
def base_kwargs(self):
return {
"volumes": {
self.socket_volume.inner(): "/var/run/gunicorn",
self.static_volume.inner(): "/app/static:ro",
},
"environment": {"DATABASE_URL": self.db_url}
}
# Create definition instances
socket_volume = VolumeDefinition("socket")
static_volume = VolumeDefinition("static")
django_container = DjangoContainer(
"django", socket_volume, static_volume,
postgresql_container.database_url())
# Create pytest fixtures
socket_volume_fixture = socket_volume.pytest_fixture("socket_volume")
static_volume_fixture = static_volume.pytest_fixture("static_volume")
django_fixture = django_container.pytest_fixture(
"django_container",
dependencies=[
"socket_volume", "static_volume", "postgresql_container"])
This example is explained in the `introductory blog post`_ and
`demo repository`_.
.. todo::
Document this properly.
.. _`introductory blog post`:
https://medium.com/mobileforgood/patterns-for-continuous-integration-with-docker-on-travis-ci-ba7e3a5ca2aa
.. _`demo repository`:
https://github.com/JayH5/seaworthy-demo
"""
__model_type__ = models.volumes.Volume
|
praekeltfoundation/seaworthy
|
seaworthy/definitions.py
|
Python
|
bsd-3-clause
| 16,642
|
r"""
Compute homogenized elastic coefficients for a given heterogeneous linear
elastic microstructure, see [1] for details or [2] and [3] for a quick
explanation. The mixed formulation, where displacements and pressures are
as unknowns, is used in this example.
[1] D. Cioranescu, J.S.J. Paulin: Homogenization in open sets with holes.
Journal of Mathematical Analysis and Applications 71(2), 1979, pages 590-607.
https://doi.org/10.1016/0022-247X(79)90211-7
[2] J. Pinho-da-Cruz, J.A. Oliveira, F. Teixeira-Dias:
Asymptotic homogenisation in linear elasticity.
Part I: Mathematical formulation and finite element modelling.
Computational Materials Science 45(4), 2009, pages 1073-1080.
http://dx.doi.org/10.1016/j.commatsci.2009.02.025
[3] J. Pinho-da-Cruz, J.A. Oliveira, F. Teixeira-Dias:
Asymptotic homogenisation in linear elasticity.
Part II: Finite element procedures and multiscale applications.
Computational Materials Science 45(4), 2009, pages 1081-1096.
http://dx.doi.org/10.1016/j.commatsci.2009.01.027
"""
from __future__ import absolute_import
import numpy as nm
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed,\
bulk_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions, get_box_volume
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u,\
compute_stress_strain_u, compute_mac_stress_part, add_stress_p
def recovery_le(pb, corrs, macro):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u(corrs['corrs_le'], macro['strain'], 'u', dim)
mic_p = - compute_micro_u(corrs['corrs_le'], macro['strain'], 'p', dim)
out['u_mic'] = Struct(name='output_data',
mode='vertex', data=mic_u,
var_name='u', dofs=None)
out['p_mic'] = Struct(name='output_data',
mode='cell', data=mic_p[:, nm.newaxis,
:, nm.newaxis],
var_name='p', dofs=None)
stress_Y, strain_Y = \
compute_stress_strain_u(pb, 'i', 'Y', 'mat.D', 'u', mic_u)
stress_Y += \
compute_mac_stress_part(pb, 'i', 'Y', 'mat.D', 'u', macro['strain'])
add_stress_p(stress_Y, pb, 'i', 'Y', 'p', mic_p)
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct(name='output_data',
mode='cell', data=strain,
dofs=None)
out['cauchy_stress'] = Struct(name='output_data',
mode='cell', data=stress_Y,
dofs=None)
return out
dim = 3
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
regions = {
'Y': 'all',
'Ym': 'cells of group 1',
'Yc': 'cells of group 2',
}
regions.update(define_box_regions(dim, region_lbn, region_rtf))
materials = {
'mat': ({'D': {'Ym': stiffness_from_youngpoisson_mixed(dim, 7.0e9, 0.4),
'Yc': stiffness_from_youngpoisson_mixed(dim, 70.0e9, 0.2)},
'gamma': {'Ym': 1.0/bulk_from_youngpoisson(7.0e9, 0.4),
'Yc': 1.0/bulk_from_youngpoisson(70.0e9, 0.2)}},),
}
fields = {
'corrector_u': ('real', dim, 'Y', 1),
'corrector_p': ('real', 1, 'Y', 0),
}
variables = {
'u': ('unknown field', 'corrector_u'),
'v': ('test field', 'corrector_u', 'u'),
'p': ('unknown field', 'corrector_p'),
'q': ('test field', 'corrector_p', 'p'),
'Pi': ('parameter field', 'corrector_u', 'u'),
'Pi1u': ('parameter field', 'corrector_u', '(set-to-None)'),
'Pi2u': ('parameter field', 'corrector_u', '(set-to-None)'),
'Pi1p': ('parameter field', 'corrector_p', '(set-to-None)'),
'Pi2p': ('parameter field', 'corrector_p', '(set-to-None)'),
}
functions = {
'match_x_plane': (per.match_x_plane,),
'match_y_plane': (per.match_y_plane,),
'match_z_plane': (per.match_z_plane,),
}
ebcs = {
'fixed_u': ('Corners', {'u.all': 0.0}),
}
if dim == 3:
epbcs = {
'periodic_x': (['Left', 'Right'], {'u.all': 'u.all'},
'match_x_plane'),
'periodic_y': (['Near', 'Far'], {'u.all': 'u.all'},
'match_y_plane'),
'periodic_z': (['Top', 'Bottom'], {'u.all': 'u.all'},
'match_z_plane'),
}
else:
epbcs = {
'periodic_x': (['Left', 'Right'], {'u.all': 'u.all'},
'match_x_plane'),
'periodic_y': (['Bottom', 'Top'], {'u.all': 'u.all'},
'match_y_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim]]
integrals = {
'i': 2,
}
options = {
'coefs': 'coefs',
'requirements': 'requirements',
'ls': 'ls', # linear solver to use
'volume': {'value': get_box_volume(dim, region_lbn, region_rtf), },
'output_dir': 'output',
'coefs_filename': 'coefs_le_up',
'recovery_hook': 'recovery_le',
}
equation_corrs = {
'balance_of_forces':
""" dw_lin_elastic.i.Y(mat.D, v, u)
- dw_stokes.i.Y(v, p) =
- dw_lin_elastic.i.Y(mat.D, v, Pi)""",
'pressure constraint':
"""- dw_stokes.i.Y(u, q)
- dw_volume_dot.i.Y(mat.gamma, q, p) =
+ dw_stokes.i.Y(Pi, q)""",
}
coefs = {
'elastic_u': {
'requires': ['pis', 'corrs_rs'],
'expression': 'dw_lin_elastic.i.Y(mat.D, Pi1u, Pi2u)',
'set_variables': [('Pi1u', ('pis', 'corrs_rs'), 'u'),
('Pi2u', ('pis', 'corrs_rs'), 'u')],
'class': cb.CoefSymSym,
},
'elastic_p': {
'requires': ['corrs_rs'],
'expression': 'dw_volume_dot.i.Y(mat.gamma, Pi1p, Pi2p)',
'set_variables': [('Pi1p', 'corrs_rs', 'p'),
('Pi2p', 'corrs_rs', 'p')],
'class': cb.CoefSymSym,
},
'D': {
'requires': ['c.elastic_u', 'c.elastic_p'],
'class': cb.CoefSum,
},
'filenames': {},
}
requirements = {
'pis': {
'variables': ['u'],
'class': cb.ShapeDimDim,
},
'corrs_rs': {
'requires': ['pis'],
'ebcs': ['fixed_u'],
'epbcs': all_periodic,
'equations': equation_corrs,
'set_variables': [('Pi', 'pis', 'u')],
'class': cb.CorrDimDim,
'save_name': 'corrs_le',
'dump_variables': ['u', 'p'],
'is_linear': True,
},
}
solvers = {
'ls': ('ls.scipy_iterative', {
'method': 'cg',
}),
'newton': ('nls.newton', {
'i_max': 1,
'eps_a': 1e2,
})
}
|
lokik/sfepy
|
examples/homogenization/linear_homogenization_up.py
|
Python
|
bsd-3-clause
| 6,704
|
# flake8: noqa
from .util.frametools import (stata_merge, group_id, winsorize, df_to_list)
from .util.gentools import (
force_df, force_list, force_iterable, generate_chunks, base2int, int2base)
from .util.io import (
save_cli, read, write, load_or_build, loadbuild_cli, try_pickle,
load_or_build_direct, confirmer, force_valid_response, DataInteractModel)
from .util.to_latex import outreg, table_statrow, table_mainrow, write_notes
from .util.plot import (binscatter, legend_below, shrink_axes_for_legend)
from .util.reference import (
state_name_to_abbr, state_name_to_fips,
state_fips_to_name, state_fips_to_abbr,
state_abbr_to_name, state_abbr_to_fips,
state_fips_list, state_names_list, state_abbr_list)
|
dmsul/econtools
|
econtools/__init__.py
|
Python
|
bsd-3-clause
| 738
|