code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from dataclasses import dataclass
from pants.backend.awslambda.python.target_types import (
PythonAwsLambdaHandlerField,
PythonAwsLambdaRuntime,
ResolvedPythonAwsHandler,
ResolvePythonAwsHandlerRequest,
)
from pants.backend.python.subsystems.lambdex import Lambdex
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.pex import (
Pex,
PexPlatforms,
PexRequest,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.core.goals.package import (
BuiltPackage,
BuiltPackageArtifact,
OutputPathField,
PackageFieldSet,
)
from pants.core.target_types import FileSourceField
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
TransitiveTargets,
TransitiveTargetsRequest,
targets_with_sources_types,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.util.docutil import doc_url
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PythonAwsLambdaFieldSet(PackageFieldSet):
required_fields = (PythonAwsLambdaHandlerField, PythonAwsLambdaRuntime)
handler: PythonAwsLambdaHandlerField
runtime: PythonAwsLambdaRuntime
output_path: OutputPathField
@rule(desc="Create Python AWS Lambda", level=LogLevel.DEBUG)
async def package_python_awslambda(
field_set: PythonAwsLambdaFieldSet, lambdex: Lambdex, union_membership: UnionMembership
) -> BuiltPackage:
output_filename = field_set.output_path.value_or_default(
# Lambdas typically use the .zip suffix, so we use that instead of .pex.
file_ending="zip",
)
# We hardcode the platform value to the appropriate one for each AWS Lambda runtime.
# (Running the "hello world" lambda in the example code will report the platform, and can be
# used to verify correctness of these platform strings.)
py_major, py_minor = field_set.runtime.to_interpreter_version()
platform = f"linux_x86_64-cp-{py_major}{py_minor}-cp{py_major}{py_minor}"
# set pymalloc ABI flag - this was removed in python 3.8 https://bugs.python.org/issue36707
if py_major <= 3 and py_minor < 8:
platform += "m"
if (py_major, py_minor) == (2, 7):
platform += "u"
additional_pex_args = (
# Ensure we can resolve manylinux wheels in addition to any AMI-specific wheels.
"--manylinux=manylinux2014",
# When we're executing Pex on Linux, allow a local interpreter to be resolved if
# available and matching the AMI platform.
"--resolve-local-platforms",
)
pex_request = PexFromTargetsRequest(
addresses=[field_set.address],
internal_only=False,
output_filename=output_filename,
platforms=PexPlatforms([platform]),
additional_args=additional_pex_args,
additional_lockfile_args=additional_pex_args,
)
lambdex_request = PexRequest(
output_filename="lambdex.pex",
internal_only=True,
requirements=lambdex.pex_requirements(),
interpreter_constraints=lambdex.interpreter_constraints,
main=lambdex.main,
)
lambdex_pex, pex_result, handler, transitive_targets = await MultiGet(
Get(VenvPex, PexRequest, lambdex_request),
Get(Pex, PexFromTargetsRequest, pex_request),
Get(ResolvedPythonAwsHandler, ResolvePythonAwsHandlerRequest(field_set.handler)),
Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),
)
# Warn if users depend on `files` targets, which won't be included in the PEX and is a common
# gotcha.
file_tgts = targets_with_sources_types(
[FileSourceField], transitive_targets.dependencies, union_membership
)
if file_tgts:
files_addresses = sorted(tgt.address.spec for tgt in file_tgts)
logger.warning(
f"The `python_awslambda` target {field_set.address} transitively depends on the below "
"`files` targets, but Pants will not include them in the built Lambda. Filesystem APIs "
"like `open()` are not able to load files within the binary itself; instead, they "
"read from the current working directory."
f"\n\nInstead, use `resources` targets. See {doc_url('resources')}."
f"\n\nFiles targets dependencies: {files_addresses}"
)
# NB: Lambdex modifies its input pex in-place, so the input file is also the output file.
result = await Get(
ProcessResult,
VenvPexProcess(
lambdex_pex,
argv=("build", "-e", handler.val, output_filename),
input_digest=pex_result.digest,
output_files=(output_filename,),
description=f"Setting up handler in {output_filename}",
),
)
artifact = BuiltPackageArtifact(
output_filename,
extra_log_lines=(
f" Runtime: {field_set.runtime.value}",
# The AWS-facing handler function is always lambdex_handler.handler, which is the
# wrapper injected by lambdex that manages invocation of the actual handler.
" Handler: lambdex_handler.handler",
),
)
return BuiltPackage(digest=result.output_digest, artifacts=(artifact,))
def rules():
return [
*collect_rules(),
UnionRule(PackageFieldSet, PythonAwsLambdaFieldSet),
*pex_from_targets.rules(),
]
|
patricklaw/pants
|
src/python/pants/backend/awslambda/python/rules.py
|
Python
|
apache-2.0
| 5,713
|
# Задача 4. Вариант 11.
""" Напишите программу, которая выводит имя, под которым скрывается Йоханнес Бруфельдт.
Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и
смерти (если человек умер), вычислить возраст на данный момент (или момент смерти).
Для хранения всех необходимых данных требуется использовать переменные.
После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
"""
# Kurchatov N. V.
# 28.03.2016
print("Йоханнес Бруфельдт известен как Юхани Ахо.")
print("Юхани Ахо - писатель, журналист, переводчик.")
print("Место рождения - Лапинлахти, Финляндия.")
born = 1861
death=1921
age=death - born
print("Год рождения "+str(born))
print("Год смерти "+str(death))
print("Возраст "+str(age))
input("Нажмите Enter для выхода")
|
Mariaanisimova/pythonintask
|
INBa/2015/Kurchatov_N_V/z4_11.py
|
Python
|
apache-2.0
| 1,352
|
movies=["adv","qwe","zxc"]
print(movies[1])
print(movies[0])
print(movies[2])
print(movies)
print(len(movies))
movies.append("ppp")
print(movies)
movies.pop()
print(movies)
movies.extend(["www","cccc"])
print(movies)
|
wxmylife/Python-Study-Tour
|
HeadPython/chapter1/Demo/Demo1.py
|
Python
|
apache-2.0
| 221
|
# Copyright (c) 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.api import common
class ViewBuilder(common.ViewBuilder):
_collection_name = "quota_class_set"
_detail_version_modifiers = [
"add_share_group_quotas",
]
def detail_list(self, request, quota_class_set, quota_class=None):
"""Detailed view of quota class set."""
keys = (
'shares',
'gigabytes',
'snapshots',
'snapshot_gigabytes',
'share_networks',
)
view = {key: quota_class_set.get(key) for key in keys}
if quota_class:
view['id'] = quota_class
self.update_versioned_resource_dict(request, view, quota_class_set)
return {self._collection_name: view}
@common.ViewBuilder.versioned_method("2.40")
def add_share_group_quotas(self, context, view, quota_class_set):
share_groups = quota_class_set.get('share_groups')
share_group_snapshots = quota_class_set.get('share_group_snapshots')
if share_groups is not None:
view['share_groups'] = share_groups
if share_group_snapshots is not None:
view['share_group_snapshots'] = share_group_snapshots
|
bswartz/manila
|
manila/api/views/quota_class_sets.py
|
Python
|
apache-2.0
| 1,801
|
#!/usr/bin/python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GSoCProfile updating MapReduce.
"""
import logging
from google.appengine.ext import db
from google.appengine.ext.mapreduce import operation
from soc.models.user import User
from soc.modules.gsoc.models.profile import GSoCProfile
def process(profile_key):
def convert_profile_txn():
profile = db.get(profile_key)
if not profile:
logging.error("Missing profile for key '%s'." % profile_key)
return False
profile._fix_name(commit=False)
profile.is_student = bool(profile.student_info)
profile.org_admin_for = list(set(profile.org_admin_for))
profile.mentor_for = list(set(profile.org_admin_for + profile.mentor_for))
profile.is_org_admin = bool(profile.org_admin_for)
profile.is_mentor = bool(profile.mentor_for)
profile.put()
return (profile.is_student, profile.is_org_admin, profile.is_mentor)
result = db.run_in_transaction(convert_profile_txn)
if not result:
yield operation.counters.Increment("missing_profile")
return
is_student, is_admin, is_mentor = result
if is_student:
yield operation.counters.Increment("student_profiles_converted")
if is_admin:
yield operation.counters.Increment("admin_profiles_converted")
elif is_mentor:
yield operation.counters.Increment("mentor_profiles_converted")
if is_mentor:
yield operation.counters.Increment("only_mentor_profiles_converted")
yield operation.counters.Increment("profiles_converted")
|
adviti/melange
|
app/soc/mapreduce/convert_profile.py
|
Python
|
apache-2.0
| 2,054
|
import logging
from airflow.hooks import PrestoHook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class PrestoToMySqlTransfer(BaseOperator):
"""
Moves data from Presto to MySQL, note that for now the data is loaded
into memory before being pushed to MySQL, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param mysql_table: target MySQL table, use dot notation to target a
specific database
:type mysql_table: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param presto_conn_id: source presto connection
:type presto_conn_id: str
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data)
:type mysql_preoperator: str
"""
template_fields = ('sql', 'mysql_table', 'mysql_preoperator')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
mysql_table,
presto_conn_id='presto_default',
mysql_conn_id='mysql_default',
mysql_preoperator=None,
*args, **kwargs):
super(PrestoToMySqlTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.presto_conn_id = presto_conn_id
def execute(self, context):
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
logging.info("Extracting data from Presto")
logging.info(self.sql)
results = hive.get_records(self.sql)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.mysql_preoperator:
logging.info("Running MySQL preoperator")
logging.info(self.mysql_preoperator)
mysql.run(self.mysql_preoperator)
logging.info("Inserting rows into MySQL")
mysql.insert_rows(table=self.mysql_table, rows=results)
|
Chedi/airflow
|
airflow/operators/presto_to_mysql.py
|
Python
|
apache-2.0
| 2,281
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import tldextract
import re
import requests
# curl -v "http://statmt.org:8030/query_domain?domain=caletas.cr&full"
def get_tld(uri):
try:
netloc = uri.split(
'//', 1)[1].split('/', 1)[0].split(':', 1)[0].split('@')[-1]
except IndexError:
return ""
# netloc = urlparse(uri)
try:
tld = tldextract.extract(netloc)
except UnicodeError:
return None
except IndexError:
return None
return tld
def get_location(session, url, crawl, server):
""" Returns success and location """
payload = {'url': url, 'crawl': crawl,
'max_results': 1, 'verbose': 1, 'exact': 1}
r = session.get(server, params=payload)
assert 'locations' in r.json(), "line:%s\nquery: %s\nrespons:%s\n" % \
(line,
json.dumps(payload),
json.dumps(r.json()))
data = r.json()['locations']
if url not in data:
assert len(data) == 0
return False, None
return True, data[url][0]
def report_error(url, crawl, errors, total):
percentage = 100. * errors / total
sys.stderr.write("Errors: %d/%d = %.2f%%\t%s\t%s\n" %
(errors, total, percentage, crawl, url))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('candidates', type=argparse.FileType('r'),
help='file containing candidates')
parser.add_argument('outfile', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-server', help='metadata server location',
default='http://localhost:8080/query_prefix')
parser.add_argument('-slang', help='source language (e.g. en)',
default='en')
parser.add_argument('-tlang', help='source language (e.g. it)',
default='it')
args = parser.parse_args(sys.argv[1:])
total_lines, total_errors = 0, 0
with requests.Session() as session:
for line in args.candidates:
total_lines += 1
line = line.decode("utf-8")
_, src_url, src_crawl, tgt_url, tgt_crawl = line.strip().split()
src_success, src_loc = get_location(session, src_url,
src_crawl, args.server)
if not src_success:
total_errors += 1
report_error(src_url, src_crawl, total_errors, total_lines)
tgt_success, tgt_loc = get_location(session, tgt_url,
tgt_crawl, args.server)
if not tgt_success:
total_errors += 1
report_error(tgt_url, tgt_crawl, total_errors, total_lines)
if src_success and tgt_success:
args.outfile.write("%s\t%s\t%s\n" %
(src_url, src_crawl, json.dumps(src_loc)))
args.outfile.write("%s\t%s\t%s\n" %
(tgt_url, tgt_crawl, json.dumps(tgt_loc)))
sys.stderr.write("Done: ")
report_error(tgt_url, tgt_crawl, total_errors, total_lines)
|
ModernMT/DataCollection
|
baseline/locate_candidates.py
|
Python
|
apache-2.0
| 3,214
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Opserver
#
# Operational State Server for VNC
#
from gevent import monkey
monkey.patch_all()
import sys
import json
import socket
import time
import copy
import traceback
import signal
import logging
logging.getLogger('kafka').addHandler(logging.StreamHandler())
logging.getLogger('kafka').setLevel(logging.WARNING)
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from pysandesh.sandesh_base import *
from pysandesh.connection_info import ConnectionState
from pysandesh.sandesh_logger import SandeshLogger
from pysandesh.gen_py.sandesh_alarm.ttypes import SandeshAlarmAckResponseCode
import sandesh.viz.constants as viz_constants
from sandesh.alarmgen_ctrl.sandesh_alarm_base.ttypes import AlarmTrace, \
UVEAlarms, UVEAlarmInfo, UVEAlarmConfig, AlarmOperand2, AlarmCondition, \
AlarmMatch, AlarmConditionMatch, AlarmAndList, AlarmRules
from sandesh.analytics.ttypes import *
from sandesh.nodeinfo.ttypes import NodeStatusUVE, NodeStatus
from sandesh.nodeinfo.cpuinfo.ttypes import *
from sandesh.nodeinfo.process_info.ttypes import *
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT, COLLECTOR_DISCOVERY_SERVICE_NAME,\
ALARM_GENERATOR_SERVICE_NAME
from alarmgen_cfg import CfgParser
from uveserver import UVEServer
from partition_handler import PartitionHandler, UveStreamProc
from alarmgen_config_handler import AlarmGenConfigHandler
from sandesh.alarmgen_ctrl.ttypes import PartitionOwnershipReq, \
PartitionOwnershipResp, PartitionStatusReq, UVECollInfo, UVEGenInfo, \
PartitionStatusResp, UVETableAlarmReq, UVETableAlarmResp, \
AlarmgenTrace, UVEKeyInfo, UVETypeCount, UVETypeInfo, AlarmgenStatusTrace, \
AlarmgenStatus, AlarmgenStats, AlarmgenPartitionTrace, \
AlarmgenPartition, AlarmgenPartionInfo, AlarmgenUpdate, \
UVETableInfoReq, UVETableInfoResp, UVEObjectInfo, UVEStructInfo, \
UVETablePerfReq, UVETablePerfResp, UVETableInfo, UVETableCount, \
UVEAlarmStateMachineInfo, UVEAlarmState, UVEAlarmOperState,\
AlarmStateChangeTrace, UVEQTrace
from sandesh.discovery.ttypes import CollectorTrace
from cpuinfo import CpuInfoData
from opserver_util import ServicePoller
from stevedore import hook, extension
from pysandesh.util import UTCTimestampUsec
from libpartition.libpartition import PartitionClient
import discoveryclient.client as client
from kafka import KafkaClient, SimpleProducer
import redis
from collections import namedtuple
OutputRow = namedtuple("OutputRow",["key","typ","val"])
class AGTabStats(object):
""" This class is used to store per-UVE-table information
about the time taken and number of instances when
a UVE was retrieved, published or evaluated for alarms
"""
def __init__(self):
self.reset()
def record_get(self, get_time):
self.get_time += get_time
self.get_n += 1
def record_pub(self, get_time):
self.pub_time += get_time
self.pub_n += 1
def record_call(self, get_time):
self.call_time += get_time
self.call_n += 1
def get_result(self):
if self.get_n:
return self.get_time / self.get_n
else:
return 0
def pub_result(self):
if self.pub_n:
return self.pub_time / self.pub_n
else:
return 0
def call_result(self):
if self.call_n:
return self.call_time / self.call_n
else:
return 0
def reset(self):
self.call_time = 0
self.call_n = 0
self.get_time = 0
self.get_n = 0
self.pub_time = 0
self.pub_n = 0
class AGKeyInfo(object):
""" This class is used to maintain UVE contents
"""
def __init__(self, part):
self._part = part
# key of struct name, value of content dict
self.current_dict = {}
self.update({})
def update_single(self, typ, val):
# A single UVE struct has changed
# If the UVE has gone away, the val is passed in as None
self.set_removed = set()
self.set_added = set()
self.set_changed = set()
self.set_unchanged = self.current_dict.keys()
if typ in self.current_dict:
# the "added" set must stay empty in this case
if val is None:
self.set_unchanged.remove(typ)
self.set_removed.add(typ)
del self.current_dict[typ]
else:
# both "added" and "removed" will be empty
if val != self.current_dict[typ]:
self.set_unchanged.remove(typ)
self.set_changed.add(typ)
self.current_dict[typ] = val
else:
if val != None:
self.set_added.add(typ)
self.current_dict[typ] = val
def update(self, new_dict):
# A UVE has changed, and we have the entire new
# content of the UVE available in new_dict
set_current = set(new_dict.keys())
set_past = set(self.current_dict.keys())
set_intersect = set_current.intersection(set_past)
self.set_added = set_current - set_intersect
self.set_removed = set_past - set_intersect
self.set_changed = set()
self.set_unchanged = set()
for o in set_intersect:
if new_dict[o] != self.current_dict[o]:
self.set_changed.add(o)
else:
self.set_unchanged.add(o)
self.current_dict = new_dict
def values(self):
return self.current_dict
def added(self):
return self.set_added
def removed(self):
return self.set_removed
def changed(self):
return self.set_changed
def unchanged(self):
return self.set_unchanged
class AlarmProcessor(object):
def __init__(self, logger):
self.uve_alarms = {}
self._logger = logger
self.ActiveTimer = {}
self.IdleTimer = {}
self.FreqExceededCheck = {}
self.FreqCheck_Times = {}
self.FreqCheck_Seconds = {}
def process_alarms(self, alarm_fqname, alarm, uv, local_uve):
if not alarm.is_enabled():
return
alarm_name = alarm_fqname.rsplit(':', 1)[1]
sev = alarm.severity()
if not uv in self.ActiveTimer:
self.ActiveTimer[uv] = {}
self.ActiveTimer[uv][alarm_name] = alarm.ActiveTimer()
if not uv in self.IdleTimer:
self.IdleTimer[uv] = {}
self.IdleTimer[uv][alarm_name] = alarm.IdleTimer()
if not uv in self.FreqExceededCheck:
self.FreqExceededCheck[uv] = {}
self.FreqExceededCheck[uv][alarm_name] = alarm.FreqExceededCheck()
if not uv in self.FreqCheck_Times:
self.FreqCheck_Times[uv] = {}
self.FreqCheck_Times[uv][alarm_name] = alarm.FreqCheck_Times()
if not uv in self.FreqCheck_Seconds:
self.FreqCheck_Seconds[uv] = {}
self.FreqCheck_Seconds[uv][alarm_name] = alarm.FreqCheck_Seconds()
try:
# __call__ method overrides the generic alarm processing code.
if hasattr(alarm, '__call__'):
or_list = alarm.__call__(uv, local_uve)
else:
or_list = self._evaluate_uve_for_alarms(
alarm.config(), uv, local_uve)
self._logger.debug("Alarm[%s] %s: %s" %
(uv, alarm_name, str(or_list)))
if or_list:
self.uve_alarms[alarm_name] = UVEAlarmInfo(type=alarm_name,
severity=sev, timestamp=0, token="",
alarm_rules=AlarmRules(or_list),
description=alarm.description(), ack=False)
except Exception as ex:
template = "Exception {0} in Alarm Processing. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
self.uve_alarms[alarm_name] = UVEAlarmInfo(type=alarm_name,
severity=sev, timestamp=0, token="",
alarm_rules=AlarmRules(None),
description=alarm.description(), ack=False)
# end process_alarms
def _get_uve_attribute(self, tuve, puve, attr_list):
if tuve is None or not attr_list:
return {'value': tuve, 'parent_attr': puve,
'status': False if len(attr_list) else True}
if isinstance(tuve, dict):
return self._get_uve_attribute(tuve.get(attr_list[0]),
tuve, attr_list[1:])
elif isinstance(tuve, list):
return [self._get_uve_attribute(elem, tuve, attr_list) \
for elem in tuve]
elif isinstance(tuve, str):
try:
json_elem = json.loads(tuve)
except ValueError:
return {'value': None, 'parent_attr': tuve, 'status': False}
else:
return self._get_uve_attribute(json_elem, tuve, attr_list)
# end _get_uve_attribute
def _get_operand_value(self, uve, operand):
attr_list = operand.split('.')
return self._get_uve_attribute(uve, uve, attr_list)
# end _get_operand_value
def _get_json_value(self, val):
try:
tval = json.loads(val)
except (ValueError, TypeError):
return json.dumps(val)
else:
return val
# end _get_json_value
def _get_json_variables(self, uve, exp, operand1_val,
operand2_val, is_operand2_json_val):
json_vars = {}
for var in exp.variables:
# If var and operand1/operand2 are at the same hirerarchy in
# the uve struture, then get the value of var from parent_attr of
# the corresponding operand_val
# TODO: Handle the case len(var.rsplit) != len(operand.rsplit)
if var.rsplit('.', 1)[0] == exp.operand1.rsplit('.', 1)[0]:
var_val = \
operand1_val['parent_attr'].get(var.rsplit('.', 1)[1])
elif not is_operand2_json_val and \
var.rsplit('.', 1)[0] == exp.operand2.uve_attribute.rsplit(
'.', 1)[0]:
var_val = \
operand2_val['parent_attr'].get(var.rsplit('.', 1)[1])
else:
var_val = self._get_operand_value(uve, var)['value']
json_vars[var] = self._get_json_value(var_val)
return json_vars
# end _get_json_variables
def _compare_operand_vals(self, val1, val2, operation):
try:
val1 = json.loads(val1)
except (TypeError, ValueError):
pass
try:
val2 = json.loads(val2)
except (TypeError, ValueError):
pass
if operation == '==':
return val1 == val2
elif operation == '!=':
return val1 != val2
elif operation == '<=':
return val1 <= val2
elif operation == '>=':
return val1 >= val2
elif operation == 'in':
if not isinstance(val2, list):
return False
return val1 in val2
elif operation == 'not in':
if not isinstance(val2, list):
return True
return val1 not in val2
elif operation == 'size==':
if not isinstance(val1, list):
return False
return len(val1) == val2
elif operation == 'size!=':
if not isinstance(val1, list):
return True
return len(val1) != val2
# end _compare_operand_vals
def _get_alarm_match(self, uve, exp, operand1_val, operand2_val,
is_operand2_json_val):
json_vars = self._get_json_variables(uve, exp, operand1_val,
operand2_val, is_operand2_json_val)
json_operand1_val = self._get_json_value(operand1_val['value'])
if not is_operand2_json_val:
json_operand2_val = self._get_json_value(operand2_val['value'])
else:
json_operand2_val = None
return AlarmMatch(json_operand1_value=json_operand1_val,
json_operand2_value=json_operand2_val, json_variables=json_vars)
# end _get_alarm_match
def _get_alarm_condition_match(self, uve, exp, operand1_val, operand2_val,
is_operand2_json_val, match_list=None):
if not match_list:
match_list = [self._get_alarm_match(uve, exp, operand1_val,
operand2_val, is_operand2_json_val)]
return AlarmConditionMatch(
condition=AlarmCondition(operation=exp.operation,
operand1=exp.operand1, operand2=AlarmOperand2(
uve_attribute=exp.operand2.uve_attribute,
json_value=exp.operand2.json_value),
variables=exp.variables),
match=match_list)
# end _get_alarm_condition_match
def _get_uve_parent_fqname(self, table, uve_name, uve):
try:
# virtual-machine UVE key doesn't have project name in the prefix.
# Hence extract the project name from the interface_list.
if table == viz_constants.VM_TABLE:
return uve['UveVirtualMachineAgent']['interface_list'][0].\
rsplit(':', 1)[0]
else:
return uve_name.rsplit(':', 1)[0]
except (KeyError, IndexError):
return None
# end _get_uve_parent_fqname
def _evaluate_uve_for_alarms(self, alarm_cfg, uve_key, uve):
table, uve_name = uve_key.split(':', 1)
# For alarms configured under project, the parent fq_name of the uve
# should match with that of the alarm config
if alarm_cfg.parent_type == 'project':
uve_parent_fqname = self._get_uve_parent_fqname(table,
uve_name, uve)
if uve_parent_fqname != alarm_cfg.get_parent_fq_name_str():
return None
or_list = []
for cfg_and_list in alarm_cfg.alarm_rules.or_list:
and_list = []
and_list_fail = False
for exp in cfg_and_list.and_list:
operand1_val = self._get_operand_value(uve, exp.operand1)
if isinstance(operand1_val, dict) and \
operand1_val['status'] is False:
and_list_fail = True
break
if exp.operand2.json_value is not None:
operand2_val = json.loads(exp.operand2.json_value)
is_operand2_json_val = True
else:
operand2_val = self._get_operand_value(uve,
exp.operand2.uve_attribute)
if isinstance(operand2_val, dict) and \
operand2_val['status'] is False:
and_list_fail = True
break
is_operand2_json_val = False
if isinstance(operand1_val, list) or \
(is_operand2_json_val is False and \
isinstance(operand2_val, list)):
match_list = []
# both operand1_val and operand2_val are list
if isinstance(operand1_val, list) and \
(is_operand2_json_val is False and \
isinstance(operand2_val, list)):
if len(operand1_val) != len(operand2_val):
and_list_fail = True
break
for i in range(0, len(operand1_val)):
if self._compare_operand_vals(
operand1_val[i]['value'],
operand2_val[i]['value'],
exp.operation):
match_list.append(
self._get_alarm_match(
uve, exp, operand1_val[i],
operand2_val[i],
is_operand2_json_val))
# operand1_val is list and operand2_val is not list
elif isinstance(operand1_val, list):
val2 = operand2_val
if not is_operand2_json_val:
val2 = operand2_val['value']
for val1 in operand1_val:
if self._compare_operand_vals(val1['value'],
val2, exp.operation):
match_list.append(self._get_alarm_match(
uve, exp, val1, operand2_val,
is_operand2_json_val))
# operand1_val is not list and operand2_val is list
elif is_operand2_json_val is False and \
isinstance(operand2_val, list):
for val2 in operand2_val:
if self._compare_operand_vals(
operand1_val['value'], val2['value'],
exp.operation):
match_list.append(self._get_alarm_match(
uve, exp, operand1_val, val2,
is_operand2_json_val))
if match_list:
and_list.append(self._get_alarm_condition_match(
uve, exp, operand1_val, operand2_val,
is_operand2_json_val, match_list))
else:
and_list_fail = True
break
# Neither operand1_val nor operand2_val is a list
else:
val1 = operand1_val['value']
val2 = operand2_val
if not is_operand2_json_val:
val2 = operand2_val['value']
if self._compare_operand_vals(val1, val2, exp.operation):
and_list.append(self._get_alarm_condition_match(
uve, exp, operand1_val, operand2_val,
is_operand2_json_val))
else:
and_list_fail = True
break
if not and_list_fail:
or_list.append(AlarmAndList(and_list))
if or_list:
return or_list
return None
# end _evaluate_uve_for_alarms
class AlarmStateMachine:
tab_alarms_timer = {}
last_timers_run = None
def __init__(self, tab, uv, nm, sandesh, activeTimer, idleTimer,
freqCheck_Times, freqCheck_Seconds, freqExceededCheck):
self._sandesh = sandesh
self._logger = sandesh._logger
self.tab = tab
self.uv = uv
self.nm = nm
self.uac = UVEAlarmConfig(ActiveTimer = activeTimer, IdleTimer = \
idleTimer, FreqCheck_Times = freqCheck_Times, FreqCheck_Seconds
= freqCheck_Seconds, FreqExceededCheck = freqExceededCheck)
self.uas = UVEAlarmOperState(state = UVEAlarmState.Idle,
head_timestamp = 0, alarm_timestamp = [])
self.uai = None
self.activeTimeout = None
self.deleteTimeout = None
self.idleTimeout = None
def get_uai(self, forced=False):
"""
This functions returns all the alarms which are in Active or
Soak_Idle state, all other alarms are not yet asserted or cleared
"""
if forced:
return self.uai
if self.uas.state == UVEAlarmState.Active or \
self.uas.state == UVEAlarmState.Soak_Idle:
return self.uai
return None
def get_uac(self):
return self.uac
def get_uas(self):
return self.uas
def set_uai(self, uai):
self.uai = uai
def is_new_alarm_same(self, new_uai):
uai2 = copy.deepcopy(self.uai)
uai2.timestamp = 0
uai2.token = ""
uai2.ack = False
if (uai2 == new_uai) and \
self.uas.state == UVEAlarmState.Active:
return True
return False
def _remove_timer_from_list(self, index):
AlarmStateMachine.tab_alarms_timer[index].discard((self.tab,\
self.uv, self.nm))
if len(AlarmStateMachine.tab_alarms_timer[index]) == 0:
del AlarmStateMachine.tab_alarms_timer[index]
def set_alarms(self):
"""
This function runs the state machine code for setting an alarm
If a timer becomes Active, caller should send out updated AlarmUVE
"""
old_state = self.uas.state
curr_time = int(time.time())
if self.uas.state == UVEAlarmState.Soak_Idle:
self.uas.state = UVEAlarmState.Active
self._remove_timer_from_list(self.idleTimeout)
elif self.uas.state == UVEAlarmState.Idle:
if self.deleteTimeout and (self.deleteTimeout in \
AlarmStateMachine.tab_alarms_timer):
self._remove_timer_from_list(self.deleteTimeout)
if self.uac.FreqExceededCheck:
# log the timestamp
ts = int(self.uai.timestamp/1000000.0)
if len(self.uas.alarm_timestamp) <= self.uas.head_timestamp:
self.uas.alarm_timestamp.append(ts)
else:
self.uas.alarm_timestamp[self.uas.head_timestamp] = ts
self.uas.head_timestamp = (self.uas.head_timestamp + 1) % \
(self.uac.FreqCheck_Times + 1)
if not self.uac.ActiveTimer or self.is_alarm_frequency_exceeded():
self.uas.state = UVEAlarmState.Active
else:
# put it on the timer
self.uas.state = UVEAlarmState.Soak_Active
self.activeTimeout = curr_time + self.uac.ActiveTimer
timeout_value = self.activeTimeout
if not timeout_value in AlarmStateMachine.tab_alarms_timer:
AlarmStateMachine.tab_alarms_timer[timeout_value] = set()
AlarmStateMachine.tab_alarms_timer[timeout_value].add\
((self.tab, self.uv, self.nm))
self.send_state_change_trace(old_state, self.uas.state)
#end set_alarms
def send_state_change_trace(self, os, ns):
# No need to send if old and new states are same
if os == ns:
return
state_trace = AlarmStateChangeTrace()
state_trace.table = str(self.tab)
state_trace.uv = str(self.uv)
state_trace.alarm_type = str(self.nm)
state_trace.old_state = os
state_trace.new_state = ns
state_trace.trace_msg(name="AlarmStateChangeTrace", \
sandesh=self._sandesh)
def clear_alarms(self):
"""
This function runs the state machine code for clearing an alarm
If a timer becomes Idle with no soaking enabled,
caller should delete corresponding alarm and send out updated AlarmUVE
"""
cur_time = int(time.time())
old_state = self.uas.state
delete_alarm = False
if self.uas.state == UVEAlarmState.Soak_Active:
# stop the active timer and start idle timer
self.uas.state = UVEAlarmState.Idle
self._remove_timer_from_list(self.activeTimeout)
if self.uac.FreqCheck_Seconds:
self.deleteTimeout = cur_time + self.uac.FreqCheck_Seconds
to_value = self.deleteTimeout
if not to_value in AlarmStateMachine.tab_alarms_timer:
AlarmStateMachine.tab_alarms_timer[to_value] = set()
AlarmStateMachine.tab_alarms_timer[to_value].add\
((self.tab, self.uv, self.nm))
else:
delete_alarm = True
elif self.uas.state == UVEAlarmState.Active:
if not self.uac.IdleTimer:
# Move to Idle state, caller should delete it
self.uas.state = UVEAlarmState.Idle
if self.uac.FreqCheck_Seconds:
self.deleteTimeout = cur_time + self.uac.FreqCheck_Seconds
to_value = self.deleteTimeout
if not to_value in AlarmStateMachine.tab_alarms_timer:
AlarmStateMachine.tab_alarms_timer[to_value] = set()
AlarmStateMachine.tab_alarms_timer[to_value].add\
((self.tab, self.uv, self.nm))
else:
delete_alarm = True
else:
self.uas.state = UVEAlarmState.Soak_Idle
self.idleTimeout = cur_time + self.uac.IdleTimer
to_value = self.idleTimeout
if not to_value in AlarmStateMachine.tab_alarms_timer:
AlarmStateMachine.tab_alarms_timer[to_value] = set()
AlarmStateMachine.tab_alarms_timer[to_value].add\
((self.tab, self.uv, self.nm))
self.send_state_change_trace(old_state, self.uas.state)
return delete_alarm
def is_alarm_frequency_exceeded(self):
if not self.uac.FreqExceededCheck or \
not self.uac.FreqCheck_Times or \
not self.uac.FreqCheck_Seconds:
return False
if len(self.uas.alarm_timestamp) < self.uac.FreqCheck_Times + 1:
return False
freqCheck_times = self.uac.FreqCheck_Times
head = self.uas.head_timestamp
start = (head + freqCheck_times + 1) % freqCheck_times
end = (head + freqCheck_times) % freqCheck_times
if (self.uas.alarm_timestamp[end] - self.uas.alarm_timestamp[start]) \
<= self.uac.FreqCheck_Seconds:
self._logger.info("alarm frequency is exceeded, raising alarm")
return True
return False
def run_active_timer(self, curr_time):
update_alarm = False
timeout_value = None
if curr_time >= self.activeTimeout:
self.send_state_change_trace(self.uas.state,
UVEAlarmState.Active)
self.uas.state = UVEAlarmState.Active
timeout_value = -1
update_alarm = True
return timeout_value, update_alarm
def run_idle_timer(self, curr_time):
"""
This is the handler function for checking timer in Soak_Idle state.
State Machine should be deleted by the caller if this timer fires
"""
idleTimerExpired = 0
update_alarm = False
timeout_value = None
delete_alarm = False
if self.idleTimeout:
idleTimerExpired = curr_time - self.idleTimeout
if idleTimerExpired >= 0:
self.send_state_change_trace(self.uas.state, UVEAlarmState.Idle)
self.uas.state = UVEAlarmState.Idle
if self.uac.FreqCheck_Seconds:
self.deleteTimeout = curr_time + self.uac.FreqCheck_Seconds
timeout_value = self.deleteTimeout
update_alarm = True
else:
delete_alarm = True
return timeout_value, update_alarm, delete_alarm
def run_delete_timer(self, curr_time):
"""
This is the handler function for checking timer in Idle state.
State Machine should be deleted by the caller if this timer fires
"""
delete_alarm = False
idleTimerExpired = 0
if self.deleteTimeout > 0:
idleTimerExpired = curr_time - self.deleteTimeout
if idleTimerExpired >= 0:
delete_alarm = True
return delete_alarm
def run_uve_soaking_timer(self, curr_time):
"""
This function goes through the list of alarms which were raised
or set to delete but not soaked yet.
If an alarm is soaked for corresponding soak_time then it is asserted
or deleted
"""
update_alarm = False
delete_alarm = False
timeout_value = None
if self.uas.state == UVEAlarmState.Soak_Active:
timeout_value, update_alarm = self.run_active_timer(curr_time)
elif self.uas.state == UVEAlarmState.Soak_Idle:
timeout_value, update_alarm, delete_alarm = self.run_idle_timer(curr_time)
elif self.uas.state == UVEAlarmState.Idle:
delete_alarm = self.run_delete_timer(curr_time)
return delete_alarm, update_alarm, timeout_value
#end run_uve_soaking_timer
def delete_timers(self):
if self.uas.state == UVEAlarmState.Idle:
if self.deleteTimeout and self.deleteTimeout > 0:
self._remove_timer_from_list(self.deleteTimeout)
elif self.uas.state == UVEAlarmState.Soak_Active:
if self.activeTimeout and self.activeTimeout > 0:
self._remove_timer_from_list(self.activeTimeout)
elif self.uas.state == UVEAlarmState.Soak_Idle:
if self.idleTimeout and self.idleTimeout > 0:
self._remove_timer_from_list(self.idleTimeout)
@staticmethod
def run_timers(curr_time, tab_alarms):
inputs = namedtuple('inputs', ['tab', 'uv', 'nm',
'delete_alarm', 'timeout_val', 'old_to'])
delete_alarms = []
update_alarms = []
if AlarmStateMachine.last_timers_run is None:
AlarmStateMachine.last_timers_run = curr_time
for next_timer in range(AlarmStateMachine.last_timers_run, curr_time + 1):
if next_timer in AlarmStateMachine.tab_alarms_timer:
update_timers = []
for (tab, uv, nm) in AlarmStateMachine.tab_alarms_timer[next_timer]:
asm = tab_alarms[tab][uv][nm]
delete_alarm, update_alarm, timeout_val = \
asm.run_uve_soaking_timer(next_timer)
if delete_alarm:
delete_alarms.append((asm.tab, asm.uv, asm.nm))
if update_alarm:
update_alarms.append((asm.tab, asm.uv, asm.nm))
update_timers.append(inputs(tab=asm.tab, uv=asm.uv, nm=asm.nm,
delete_alarm=delete_alarm,
timeout_val=timeout_val, old_to=next_timer))
for timer in update_timers:
if timer.timeout_val is not None or timer.delete_alarm:
AlarmStateMachine.update_tab_alarms_timer(timer.tab,
timer.uv, timer.nm, timer.old_to,
timer.timeout_val, tab_alarms)
AlarmStateMachine.last_timers_run = curr_time + 1
return delete_alarms, update_alarms
@staticmethod
def update_tab_alarms_timer(tab, uv, nm, curr_index, timeout_val,
tab_alarms):
del_timers = []
if curr_index is not None and curr_index > 0:
timers = AlarmStateMachine.tab_alarms_timer[curr_index]
if (tab, uv, nm) in timers:
asm = tab_alarms[tab][uv][nm]
timers.discard((tab, uv, nm))
if len(timers) == 0:
del_timers.append(curr_index)
for timeout in del_timers:
del AlarmStateMachine.tab_alarms_timer[timeout]
if timeout_val >= 0:
if not timeout_val in AlarmStateMachine.tab_alarms_timer:
AlarmStateMachine.tab_alarms_timer[timeout_val] = set()
if (tab, uv, nm) in AlarmStateMachine.tab_alarms_timer\
[timeout_val]:
self._logger.error("Timer error for (%s,%s,%s)" % \
(tab, uv, nm))
raise SystemExit
AlarmStateMachine.tab_alarms_timer[timeout_val].add\
((asm.tab, asm.uv, asm.nm))
class Controller(object):
@staticmethod
def token(sandesh, timestamp):
token = {'host_ip': sandesh.host_ip(),
'http_port': sandesh._http_server.get_port(),
'timestamp': timestamp}
return base64.b64encode(json.dumps(token))
def fail_cb(self, manager, entrypoint, exception):
self._sandesh._logger.info("Load failed for %s with exception %s" % \
(str(entrypoint),str(exception)))
def __init__(self, conf, test_logger=None):
self._conf = conf
module = Module.ALARM_GENERATOR
self._moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
self._node_type_name = NodeTypeNames[node_type]
self.table = "ObjectCollectorInfo"
self._hostname = socket.gethostname()
self._instance_id = self._conf.worker_id()
self.disc = None
self._libpart_name = self._conf.host_ip() + ":" + self._instance_id
self._libpart = None
self._partset = set()
if self._conf.discovery()['server']:
self._max_out_rows = 20
self.disc = client.DiscoveryClient(
self._conf.discovery()['server'],
self._conf.discovery()['port'],
ModuleNames[Module.ALARM_GENERATOR],
'%s-%s' % (self._hostname, self._instance_id))
is_collector = True
if test_logger is not None:
is_collector = False
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._conf.sandesh_send_rate_limit() is not None:
SandeshSystem.set_sandesh_send_rate_limit( \
self._conf.sandesh_send_rate_limit())
self._sandesh.init_generator(self._moduleid, self._hostname,
self._node_type_name, self._instance_id,
self._conf.collectors(),
self._node_type_name,
self._conf.http_port(),
['opserver.sandesh', 'sandesh'],
host_ip=self._conf.host_ip(),
discovery_client=self.disc,
connect_to_collector = is_collector,
alarm_ack_callback=self.alarm_ack_callback)
if test_logger is not None:
self._logger = test_logger
else:
self._sandesh.set_logging_params(
enable_local_log=self._conf.log_local(),
category=self._conf.log_category(),
level=self._conf.log_level(),
file=self._conf.log_file(),
enable_syslog=self._conf.use_syslog(),
syslog_facility=self._conf.syslog_facility())
self._logger = self._sandesh._logger
# Trace buffer list
self.trace_buf = [
{'name':'DiscoveryMsg', 'size':1000},
{'name':'AlarmStateChangeTrace', 'size':1000},
{'name':'UVEQTrace', 'size':10000}
]
# Create trace buffers
for buf in self.trace_buf:
self._sandesh.trace_buffer_create(name=buf['name'], size=buf['size'])
tables = set()
mgrlist = extension.ExtensionManager('contrail.analytics.alarms')
for elem in mgrlist:
tables.add(elem.name)
self._logger.error('Found extenstions for %s' % str(tables))
self.mgrs = {}
self.tab_alarms = {}
self.ptab_info = {}
self.tab_perf = {}
self.tab_perf_prev = {}
for table in tables:
self.mgrs[table] = hook.HookManager(
namespace='contrail.analytics.alarms',
name=table,
invoke_on_load=True,
invoke_args=(),
on_load_failure_callback=self.fail_cb
)
for extn in self.mgrs[table][table]:
self._logger.info('Loaded extensions for %s: %s,%s doc %s' % \
(table, extn.name, extn.entry_point_target, extn.obj.__doc__))
self.tab_alarms[table] = {}
self.tab_perf[table] = AGTabStats()
ConnectionState.init(self._sandesh, self._hostname, self._moduleid,
self._instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus, self.table)
self._us = UVEServer(None, self._logger, self._conf.redis_password())
if not self.disc:
self._max_out_rows = 2
# If there is no discovery service, use fixed redis_uve list
redis_uve_list = []
try:
for redis_uve in self._conf.redis_uve_list():
redis_ip_port = redis_uve.split(':')
redis_elem = (redis_ip_port[0], int(redis_ip_port[1]),0)
redis_uve_list.append(redis_elem)
except Exception as e:
print('Failed to parse redis_uve_list: %s' % e)
else:
self._us.update_redis_uve_list(redis_uve_list)
# If there is no discovery service, use fixed alarmgen list
self._libpart = self.start_libpart(self._conf.alarmgen_list())
self._workers = {}
self._uvestats = {}
self._uveq = {}
self._uveqf = {}
self._alarm_config_change_map = {}
# Create config handler to read/update alarm config
rabbitmq_params = self._conf.rabbitmq_params()
self._config_handler = AlarmGenConfigHandler(self._moduleid,
self._instance_id, self.config_log, self.disc,
self._conf.keystone_params(), rabbitmq_params, self.mgrs,
self.alarm_config_change_callback)
if rabbitmq_params['servers'] and self.disc:
self._config_handler.start()
else:
self._logger.error('Rabbitmq server and/or Discovery server '
'not configured')
PartitionOwnershipReq.handle_request = self.handle_PartitionOwnershipReq
PartitionStatusReq.handle_request = self.handle_PartitionStatusReq
UVETableAlarmReq.handle_request = self.handle_UVETableAlarmReq
UVETableInfoReq.handle_request = self.handle_UVETableInfoReq
UVETablePerfReq.handle_request = self.handle_UVETablePerfReq
def config_log(self, msg, level):
self._sandesh.logger().log(
SandeshLogger.get_py_logger_level(level), msg)
# end config_log
def libpart_cb(self, part_list):
agpi = AlarmgenPartionInfo()
agpi.instance = self._instance_id
agpi.partitions = part_list
agp = AlarmgenPartition()
agp.name = self._hostname
agp.inst_parts = [agpi]
agp_trace = AlarmgenPartitionTrace(data=agp, sandesh=self._sandesh)
agp_trace.send(sandesh=self._sandesh)
newset = set(part_list)
oldset = self._partset
self._partset = newset
self._logger.error('Partition List : new %s old %s' % \
(str(newset),str(oldset)))
for addpart in (newset-oldset):
self._logger.error('Partition Add : %s' % addpart)
self.partition_change(addpart, True)
for delpart in (oldset-newset):
self._logger.error('Partition Del : %s' % delpart)
if not self.partition_change(delpart, False):
self._logger.error('Partition Del : %s failed!' % delpart)
raise SystemExit
self._logger.error('Partition List done : new %s old %s' % \
(str(newset),str(oldset)))
def start_libpart(self, ag_list):
if not self._conf.zk_list():
self._logger.error('Could not import libpartition: No zookeeper')
return None
if not ag_list:
self._logger.error('Could not import libpartition: No alarmgen list')
return None
try:
self._logger.error('Starting PC')
agpi = AlarmgenPartionInfo()
agpi.instance = self._instance_id
agpi.partitions = []
agp = AlarmgenPartition()
agp.name = self._hostname
agp.inst_parts = [agpi]
agp_trace = AlarmgenPartitionTrace(data=agp, sandesh=self._sandesh)
agp_trace.send(sandesh=self._sandesh)
pc = PartitionClient(self._conf.kafka_prefix() + "-alarmgen",
self._libpart_name, ag_list,
self._conf.partitions(), self.libpart_cb,
','.join(self._conf.zk_list()), self._logger)
self._logger.error('Started PC %s' % self._conf.kafka_prefix() + "-alarmgen")
return pc
except Exception as e:
self._logger.error('Could not import libpartition: %s' % str(e))
return None
def handle_uve_notifq(self, part, uves):
"""
uves :
This is a dict of UVEs that have changed, as per the following scheme:
<UVE-Key> : None # Any of the types may have changed
# Used during stop_partition and GenDelete
<UVE-Key> : { <Struct>: {} } # The given struct may have changed
<UVE-Key> : { <Struct>: None } # The given struct may have gone
Our treatment of the 2nd and 3rd case above is the same
"""
uveq_trace = UVEQTrace()
uveq_trace.uves = uves.keys()
uveq_trace.part = part
if part not in self._uveq:
self._uveq[part] = {}
self._logger.error('Created uveQ for part %s' % str(part))
uveq_trace.oper = "create"
else:
uveq_trace.oper = "update"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
for uv,types in uves.iteritems():
if types is None:
self._uveq[part][uv] = None
else:
if uv in self._uveq[part]:
if self._uveq[part][uv] is not None:
for kk in types.keys():
self._uveq[part][uv][kk] = {}
else:
self._uveq[part][uv] = {}
for kk in types.keys():
self._uveq[part][uv][kk] = {}
def handle_resource_check(self, part, current_inst, msgs):
"""
This function compares the set of synced redis instances
against the set now being reported by UVEServer
It returns :
- The updated set of redis instances
- A set of collectors to be removed
- A dict with the collector to be added, with the contents
"""
us_redis_inst = self._us.redis_instances()
disc_instances = copy.deepcopy(us_redis_inst)
r_added = disc_instances - current_inst
r_deleted = current_inst - disc_instances
coll_delete = set()
for r_inst in r_deleted:
ipaddr = r_inst[0]
port = r_inst[1]
coll_delete.add(ipaddr + ":" + str(port))
chg_res = {}
for r_inst in r_added:
coll, res = self._us.get_part(part, r_inst)
chg_res[coll] = res
return disc_instances, coll_delete, chg_res
def reconnect_agg_uve(self, lredis):
self._logger.error("Connected to Redis for Agg")
lredis.set(self._moduleid+':'+self._instance_id, True)
for pp in self._workers.keys():
self._workers[pp].reset_acq_time()
self._workers[pp].kill(\
RuntimeError('UVE Proc failed'),
block=False)
self.clear_agg_uve(lredis,
self._instance_id,
pp,
self._workers[pp].acq_time())
self.stop_uve_partition(pp)
for part in self._uveq.keys():
self._logger.info("Clearing part %d uveQ : %s" % \
(part,str(self._uveq[part].keys())))
uveq_trace = UVEQTrace()
uveq_trace.uves = self._uveq[part].keys()
uveq_trace.part = part
uveq_trace.oper = "clear"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
del self._uveq[part]
def clear_agg_uve(self, redish, inst, part, acq_time):
self._logger.error("Agg %s reset part %d, acq %d" % \
(inst, part, acq_time))
ppe2 = redish.pipeline()
ppe2.hdel("AGPARTS:%s" % inst, part)
ppe2.smembers("AGPARTKEYS:%s:%d" % (inst, part))
pperes2 = ppe2.execute()
ppe3 = redish.pipeline()
# Remove all contents for this AG-Partition
for elem in pperes2[-1]:
ppe3.delete("AGPARTVALUES:%s:%d:%s" % (inst, part, elem))
ppe3.delete("AGPARTKEYS:%s:%d" % (inst, part))
ppe3.hset("AGPARTS:%s" % inst, part, acq_time)
pperes3 = ppe3.execute()
def send_agg_uve(self, redish, inst, part, acq_time, rows):
"""
This function writes aggregated UVEs to redis
Each row has a UVE key, one of it's structs type names and the structs value
If type is "None", it means that the UVE is being removed
If value is none, it mean that struct of the UVE is being removed
The key and typename information is also published on a redis channel
"""
if not redish:
self._logger.error("No redis handle")
raise SystemExit
old_acq_time = redish.hget("AGPARTS:%s" % inst, part)
if old_acq_time is None:
self._logger.error("Agg %s part %d new" % (inst, part))
redish.hset("AGPARTS:%s" % inst, part, acq_time)
else:
# Is there stale information for this partition?
if int(old_acq_time) != acq_time:
self._logger.error("Agg %s stale info part %d, acqs %d,%d" % \
(inst, part, int(old_acq_time), acq_time))
self.clear_agg_uve(redish, inst, part, acq_time)
pub_list = []
ppe = redish.pipeline()
check_keys = set()
for row in rows:
vjson = json.dumps(row.val)
typ = row.typ
key = row.key
pub_list.append({"key":key,"type":typ})
if typ is None:
self._logger.debug("Agg remove part %d, key %s" % (part,key))
# The entire contents of the UVE should be removed
ppe.srem("AGPARTKEYS:%s:%d" % (inst, part), key)
ppe.delete("AGPARTVALUES:%s:%d:%s" % (inst, part, key))
else:
if row.val is None:
self._logger.debug("Agg remove part %d, key %s, type %s" % (part,key,typ))
# Remove the given struct from the UVE
ppe.hdel("AGPARTVALUES:%s:%d:%s" % (inst, part, key), typ)
check_keys.add(key)
else:
self._logger.debug("Agg update part %d, key %s, type %s" % (part,key,typ))
ppe.sadd("AGPARTKEYS:%s:%d" % (inst, part), key)
ppe.hset("AGPARTVALUES:%s:%d:%s" % (inst, part, key),
typ, vjson)
ppe.execute()
# Find the keys that have no content (all structs have been deleted)
ppe4 = redish.pipeline()
check_keys_list = list(check_keys)
for kk in check_keys_list:
ppe4.exists("AGPARTVALUES:%s:%d:%s" % (inst, part, kk))
pperes4 = ppe4.execute()
retry = False
# From the index, removes keys for which there are now no contents
ppe5 = redish.pipeline()
idx = 0
for res in pperes4:
if not res:
self._logger.error("Agg unexpected key %s from inst:part %s:%d" % \
(check_keys_list[idx], inst, part))
ppe5.srem("AGPARTKEYS:%s:%d" % (inst, part), check_keys_list[idx])
# TODO: alarmgen should have already figured out if all structs of
# the UVE are gone, and should have sent a UVE delete
# We should not need to figure this out again
retry = True
idx += 1
ppe5.execute()
redish.publish('AGPARTPUB:%s:%d' % (inst, part), json.dumps(pub_list))
if retry:
self._logger.error("Agg unexpected rows %s" % str(rows))
raise SystemExit
def send_alarm_update(self, tab, uk):
ustruct = None
alm_copy = []
for nm, asm in self.tab_alarms[tab][uk].iteritems():
uai = asm.get_uai()
if uai:
alm_copy.append(copy.deepcopy(uai))
if len(alm_copy) == 0:
ustruct = UVEAlarms(name = str(uk).split(':',1)[1], deleted = True)
self._logger.info('deleting alarm:')
else:
ustruct = UVEAlarms(name = str(uk).split(':',1)[1],
alarms = alm_copy)
alarm_msg = AlarmTrace(data=ustruct, table=tab, \
sandesh=self._sandesh)
alarm_msg.send(sandesh=self._sandesh)
self._logger.info('raising alarm %s' % (alarm_msg.log()))
def run_alarm_timers(self, curr_time):
delete_alarms, update_alarms = AlarmStateMachine.run_timers\
(curr_time, self.tab_alarms)
for alarm in delete_alarms:
del self.tab_alarms[alarm[0]][alarm[1]][alarm[2]]
self.send_alarm_update(alarm[0], alarm[1])
for alarm in update_alarms:
self.send_alarm_update(alarm[0], alarm[1])
def run_uve_processing(self):
"""
This function runs in its own gevent, and provides state compression
for UVEs.
Kafka worker (PartitionHandler) threads detect which UVE have changed
and accumulate them onto a set. When this gevent runs, it processes
all UVEs of the set. Even if this gevent cannot run for a while, the
set should not grow in an unbounded manner (like a queue can)
"""
lredis = None
oldworkerset = None
while True:
workerset = {}
for part in self._workers.keys():
if self._workers[part]._up:
workerset[part] = self._workers[part].acq_time()
if workerset != oldworkerset:
if self.disc:
data = {
'ip-address': self._conf.host_ip(),
'instance-id': self._instance_id,
'redis-port': str(self._conf.redis_server_port()),
'partitions': json.dumps(workerset)
}
self._logger.error("Disc Publish to %s : %s"
% (str(self._conf.discovery()), str(data)))
self.disc.publish(ALARM_GENERATOR_SERVICE_NAME, data)
oldworkerset = copy.deepcopy(workerset)
for part in self._uveqf.keys():
self._logger.error("Stop UVE processing for %d:%d" % \
(part, self._uveqf[part]))
self.stop_uve_partition(part)
del self._uveqf[part]
if part in self._uveq:
uveq_trace = UVEQTrace()
uveq_trace.uves = self._uveq[part].keys()
uveq_trace.part = part
uveq_trace.oper = "stop"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
self._logger.info("Stopping part %d uveQ : %s" % \
(part,str(self._uveq[part].keys())))
del self._uveq[part]
prev = time.time()
try:
if lredis is None:
lredis = redis.StrictRedis(
host="127.0.0.1",
port=self._conf.redis_server_port(),
password=self._conf.redis_password(),
db=7)
self.reconnect_agg_uve(lredis)
ConnectionState.update(conn_type = ConnectionType.REDIS_UVE,
name = 'AggregateRedis', status = ConnectionStatus.UP)
else:
if not lredis.exists(self._moduleid+':'+self._instance_id):
self._logger.error('Identified redis restart')
self.reconnect_agg_uve(lredis)
gevs = {}
pendingset = {}
kafka_topic_down = False
for part in self._uveq.keys():
if not len(self._uveq[part]):
continue
self._logger.info("UVE Process for %d" % part)
kafka_topic_down |= self._workers[part].failed()
# Allow the partition handlers to queue new UVEs without
# interfering with the work of processing the current UVEs
# Process no more than 200 keys at a time
pendingset[part] = {}
icount = 0
while (len(self._uveq[part]) > 0) and icount < 200:
kp,vp = self._uveq[part].popitem()
pendingset[part][kp] = vp
icount += 1
self._logger.info("UVE Process for %d : %d, %d remain" % \
(part, len(pendingset[part]), len(self._uveq[part])))
gevs[part] = gevent.spawn(self.handle_uve_notif,part,\
pendingset[part])
if kafka_topic_down:
ConnectionState.update(conn_type = ConnectionType.KAFKA_PUB,
name = 'KafkaTopic', status = ConnectionStatus.DOWN)
else:
ConnectionState.update(conn_type = ConnectionType.KAFKA_PUB,
name = 'KafkaTopic', status = ConnectionStatus.UP)
if len(gevs):
gevent.joinall(gevs.values())
for part in gevs.keys():
# If UVE processing failed, requeue the working set
try:
outp = gevs[part].get()
except Exception as ex:
template = "Exception {0} in notif worker. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
outp = None
if outp is None:
self._logger.error("UVE Process failed for %d" % part)
self.handle_uve_notifq(part, pendingset[part])
elif not part in self._workers:
self._logger.error(
"Part %d is gone, cannot process UVEs" % part)
else:
acq_time = self._workers[part].acq_time()
if len(outp):
rows = []
for ku,vu in outp.iteritems():
if vu is None:
# This message has no type!
# Its used to indicate a delete of the entire UVE
rows.append(OutputRow(key=ku, typ=None, val=None))
if len(rows) >= self._max_out_rows:
self.send_agg_uve(lredis,
self._instance_id,
part,
acq_time,
rows)
rows[:] = []
continue
for kt,vt in vu.iteritems():
rows.append(OutputRow(key=ku, typ=kt, val=vt))
if len(rows) >= self._max_out_rows:
self.send_agg_uve(lredis,
self._instance_id,
part,
acq_time,
rows)
rows[:] = []
# Flush all remaining rows
if len(rows):
self.send_agg_uve(lredis,
self._instance_id,
part,
acq_time,
rows)
rows[:] = []
# If there are alarm config changes, then start a gevent per
# partition to process the alarm config changes
if self._alarm_config_change_map:
alarm_config_change_map = copy.deepcopy(
self._alarm_config_change_map)
self._alarm_config_change_map = {}
alarm_workers = {}
for partition in self._workers.keys():
alarm_workers[partition] = gevent.spawn(
self.alarm_config_change_worker, partition,
alarm_config_change_map)
if alarm_workers:
gevent.joinall(alarm_workers.values())
except Exception as ex:
template = "Exception {0} in uve proc. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
lredis = None
ConnectionState.update(conn_type = ConnectionType.REDIS_UVE,
name = 'AggregateRedis', status = ConnectionStatus.DOWN)
gevent.sleep(1)
curr = time.time()
try:
self.run_alarm_timers(int(curr))
except Exception as ex:
template = "Exception {0} in timer proc. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
raise SystemExit
if (curr - prev) < 0.5:
gevent.sleep(0.5 - (curr - prev))
else:
self._logger.info("UVE Process saturated")
gevent.sleep(0)
def examine_uve_for_alarms(self, uve_key, uve):
table = uve_key.split(':', 1)[0]
alarm_cfg = self._config_handler.alarm_config_db()
prevt = UTCTimestampUsec()
aproc = AlarmProcessor(self._logger)
# Process all alarms configured for this uve-type
for alarm_fqname, alarm_obj in \
alarm_cfg.get(table, {}).iteritems():
aproc.process_alarms(alarm_fqname, alarm_obj, uve_key, uve)
# Process all alarms configured for this uve-key
for alarm_fqname, alarm_obj in \
alarm_cfg.get(uve_key, {}).iteritems():
aproc.process_alarms(alarm_fqname, alarm_obj, uve_key, uve)
new_uve_alarms = aproc.uve_alarms
self.tab_perf[table].record_call(UTCTimestampUsec() - prevt)
del_types = []
if not self.tab_alarms.has_key(table):
self.tab_alarms[table] = {}
if self.tab_alarms[table].has_key(uve_key):
for nm, asm in self.tab_alarms[table][uve_key].iteritems():
# This type was present earlier, but is now gone
if not new_uve_alarms.has_key(nm):
del_types.append(nm)
else:
# This type has no new information
if asm.is_new_alarm_same(new_uve_alarms[nm]):
del new_uve_alarms[nm]
if len(del_types) != 0 or len(new_uve_alarms) != 0:
self._logger.debug("Alarm[%s] Deleted %s" % \
(table, str(del_types)))
self._logger.debug("Alarm[%s] Updated %s" % \
(table, str(new_uve_alarms)))
# These alarm types are new or updated
for nm, uai2 in new_uve_alarms.iteritems():
uai = copy.deepcopy(uai2)
uai.timestamp = UTCTimestampUsec()
uai.token = Controller.token(self._sandesh, uai.timestamp)
if not self.tab_alarms[table].has_key(uve_key):
self.tab_alarms[table][uve_key] = {}
if not nm in self.tab_alarms[table][uve_key]:
self.tab_alarms[table][uve_key][nm] = AlarmStateMachine(
tab=table, uv=uve_key, nm=nm, sandesh=self._sandesh,
activeTimer=aproc.ActiveTimer[uve_key][nm],
idleTimer=aproc.IdleTimer[uve_key][nm],
freqCheck_Times=aproc.FreqCheck_Times[uve_key][nm],
freqCheck_Seconds= \
aproc.FreqCheck_Seconds[uve_key][nm],
freqExceededCheck= \
aproc.FreqExceededCheck[uve_key][nm])
asm = self.tab_alarms[table][uve_key][nm]
asm.set_uai(uai)
# go through alarm set statemachine code
asm.set_alarms()
# These alarm types are now gone
for dnm in del_types:
if dnm in self.tab_alarms[table][uve_key]:
delete_alarm = \
self.tab_alarms[table][uve_key][dnm].clear_alarms()
if delete_alarm:
del self.tab_alarms[table][uve_key][dnm]
self.send_alarm_update(table, uve_key)
# end examine_uve_for_alarms
def alarm_config_change_worker(self, partition, alarm_config_change_map):
self._logger.debug('Alarm config change worker for partition %d'
% (partition))
try:
for uve_key, alarm_map in alarm_config_change_map.iteritems():
self._logger.debug('Handle alarm config change for '
'[partition:uve_key:{alarms}] -> [%d:%s:%s]' %
(partition, uve_key, str(alarm_map)))
uve_type_name = uve_key.split(':', 1)
try:
if len(uve_type_name) == 1:
uves = self.ptab_info[partition][uve_type_name[0]]
else:
uve = self.ptab_info[partition][uve_type_name[0]]\
[uve_type_name[1]]
uves = {uve_type_name[1]: uve}
except KeyError:
continue
else:
for name, data in uves.iteritems():
self._logger.debug('process alarm for uve %s' % (name))
self.examine_uve_for_alarms(uve_type_name[0]+':'+name,
data.values())
gevent.sleep(0)
except Exception as e:
self._logger.error('Error in alarm config change worker for '
'partition %d - %s' % (partition, str(e)))
self._logger.error('traceback %s' % (traceback.format_exc()))
# end alarm_config_change_worker
def alarm_config_change_callback(self, alarm_config_change_map):
for table, alarm_map in alarm_config_change_map.iteritems():
try:
tamap = self._alarm_config_change_map[table]
except KeyError:
self._alarm_config_change_map[table] = alarm_map
else:
tamap.update(alarm_map)
# end alarm_config_change_callback
def stop_uve_partition(self, part):
if not part in self.ptab_info:
return
for tk in self.ptab_info[part].keys():
for rkey in self.ptab_info[part][tk].keys():
uk = tk + ":" + rkey
if tk in self.tab_alarms:
if uk in self.tab_alarms[tk]:
self.delete_tab_alarms_timer(tk, uk)
del self.tab_alarms[tk][uk]
ustruct = UVEAlarms(name = rkey, deleted = True)
alarm_msg = AlarmTrace(data=ustruct, \
table=tk, sandesh=self._sandesh)
self._logger.error('send del alarm for stop: %s' % \
(alarm_msg.log()))
alarm_msg.send(sandesh=self._sandesh)
del self.ptab_info[part][tk][rkey]
self._logger.error("UVE %s deleted in stop" % (uk))
del self.ptab_info[part][tk]
del self.ptab_info[part]
def delete_tab_alarms_timer(self, tab, uv):
"""
This function deletes all the timers for given tab,uv combination
"""
for ak,av in self.tab_alarms[tab][uv].iteritems():
av.delete_timers()
def handle_uve_notif(self, part, uves):
"""
Call this function when a UVE has changed. This can also
happed when taking ownership of a partition, or when a
generator is deleted.
Args:
part : Partition Number
uve : dict, where the key is the UVE Name.
The value is either a dict of UVE structs, or "None",
which means that all UVE structs should be processed.
Returns:
status of operation (True for success)
"""
self._logger.debug("Changed part %d UVEs : %s" % (part, str(uves)))
success = True
output = {}
uveq_trace = UVEQTrace()
uveq_trace.uves = uves.keys()
uveq_trace.part = part
uveq_trace.oper = "process"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
erruves = []
for uv,types in uves.iteritems():
tab = uv.split(':',1)[0]
if tab not in self.tab_perf:
self.tab_perf[tab] = AGTabStats()
if part in self._uvestats:
# Record stats on UVE Keys being processed
if not tab in self._uvestats[part]:
self._uvestats[part][tab] = {}
if uv in self._uvestats[part][tab]:
self._uvestats[part][tab][uv] += 1
else:
self._uvestats[part][tab][uv] = 1
uve_name = uv.split(':',1)[1]
prevt = UTCTimestampUsec()
filters = {}
if types:
filters["cfilt"] = {}
for typ in types.keys():
filters["cfilt"][typ] = set()
failures, uve_data = self._us.get_uve(uv, True, filters)
if failures:
erruves.append(uv)
success = False
self.tab_perf[tab].record_get(UTCTimestampUsec() - prevt)
# Handling Agg UVEs
if not part in self.ptab_info:
self._logger.error("Creating UVE table for part %s" % str(part))
self.ptab_info[part] = {}
if not tab in self.ptab_info[part]:
self.ptab_info[part][tab] = {}
if uve_name not in self.ptab_info[part][tab]:
self.ptab_info[part][tab][uve_name] = AGKeyInfo(part)
prevt = UTCTimestampUsec()
output[uv] = {}
touched = False
if not types:
self.ptab_info[part][tab][uve_name].update(uve_data)
if len(self.ptab_info[part][tab][uve_name].removed()):
touched = True
rset = self.ptab_info[part][tab][uve_name].removed()
self._logger.info("UVE %s removed structs %s" % (uv, rset))
uveq_trace = UVEQTrace()
uveq_trace.uves = [uv + "-" + str(rset)]
uveq_trace.part = part
uveq_trace.oper = "remove"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
for rems in self.ptab_info[part][tab][uve_name].removed():
output[uv][rems] = None
if len(self.ptab_info[part][tab][uve_name].changed()):
touched = True
self._logger.debug("UVE %s changed structs %s" % (uv, \
self.ptab_info[part][tab][uve_name].changed()))
for chgs in self.ptab_info[part][tab][uve_name].changed():
output[uv][chgs] = \
self.ptab_info[part][tab][uve_name].values()[chgs]
if len(self.ptab_info[part][tab][uve_name].added()):
touched = True
self._logger.debug("UVE %s added structs %s" % (uv, \
self.ptab_info[part][tab][uve_name].added()))
for adds in self.ptab_info[part][tab][uve_name].added():
output[uv][adds] = \
self.ptab_info[part][tab][uve_name].values()[adds]
else:
for typ in types:
val = None
if typ in uve_data:
val = uve_data[typ]
self.ptab_info[part][tab][uve_name].update_single(typ, val)
if len(self.ptab_info[part][tab][uve_name].removed()):
touched = True
rset = self.ptab_info[part][tab][uve_name].removed()
self._logger.info("UVE %s removed structs %s" % (uv, rset))
uveq_trace = UVEQTrace()
uveq_trace.uves = [uv + "-" + str(rset)]
uveq_trace.part = part
uveq_trace.oper = "remove"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
for rems in self.ptab_info[part][tab][uve_name].removed():
output[uv][rems] = None
if len(self.ptab_info[part][tab][uve_name].changed()):
touched = True
self._logger.debug("UVE %s changed structs %s" % (uve_name, \
self.ptab_info[part][tab][uve_name].changed()))
for chgs in self.ptab_info[part][tab][uve_name].changed():
output[uv][chgs] = \
self.ptab_info[part][tab][uve_name].values()[chgs]
if len(self.ptab_info[part][tab][uve_name].added()):
touched = True
self._logger.debug("UVE %s added structs %s" % (uve_name, \
self.ptab_info[part][tab][uve_name].added()))
for adds in self.ptab_info[part][tab][uve_name].added():
output[uv][adds] = \
self.ptab_info[part][tab][uve_name].values()[adds]
if not touched:
del output[uv]
local_uve = self.ptab_info[part][tab][uve_name].values()
self.tab_perf[tab].record_pub(UTCTimestampUsec() - prevt)
if len(local_uve.keys()) == 0:
self._logger.info("UVE %s deleted in proc" % (uv))
del self.ptab_info[part][tab][uve_name]
output[uv] = None
if tab in self.tab_alarms:
if uv in self.tab_alarms[tab]:
del_types = []
for nm, asm in self.tab_alarms[tab][uv].iteritems():
delete_alarm = \
self.tab_alarms[tab][uv][nm].clear_alarms()
if delete_alarm:
del_types.append(nm)
for nm in del_types:
del self.tab_alarms[tab][uv][nm]
self.send_alarm_update(tab, uv)
# Both alarm and non-alarm contents are gone.
# We do not need to do alarm evaluation
continue
# Withdraw the alarm if the UVE has no non-alarm structs
if len(local_uve.keys()) == 1 and "UVEAlarms" in local_uve:
if tab in self.tab_alarms:
if uv in self.tab_alarms[tab]:
self._logger.info("UVE %s has no non-alarm" % (uv))
del_types = []
for nm, asm in self.tab_alarms[tab][uv].iteritems():
delete_alarm = \
self.tab_alarms[tab][uv][nm].clear_alarms()
if delete_alarm:
del_types.append(nm)
for nm in del_types:
del self.tab_alarms[tab][uv][nm]
self.send_alarm_update(tab, uv)
continue
# Examine UVE to check if alarm need to be raised/deleted
self.examine_uve_for_alarms(uv, local_uve)
if success:
uveq_trace = UVEQTrace()
uveq_trace.uves = output.keys()
uveq_trace.part = part
uveq_trace.oper = "proc-output"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
return output
else:
uveq_trace = UVEQTrace()
uveq_trace.uves = erruves
uveq_trace.part = part
uveq_trace.oper = "proc-error"
uveq_trace.trace_msg(name="UVEQTrace",\
sandesh=self._sandesh)
return None
def handle_UVETableInfoReq(self, req):
if req.partition == -1:
parts = self.ptab_info.keys()
else:
parts = [req.partition]
self._logger.info("Got UVETableInfoReq : %s" % str(parts))
np = 1
for part in parts:
if part not in self.ptab_info:
continue
tables = []
for tab in self.ptab_info[part].keys():
uvel = []
for uk,uv in self.ptab_info[part][tab].iteritems():
types = []
for tk,tv in uv.values().iteritems():
types.append(UVEStructInfo(type = tk,
content = json.dumps(tv)))
uvel.append(UVEObjectInfo(
name = uk, structs = types))
tables.append(UVETableInfo(table = tab, uves = uvel))
resp = UVETableInfoResp(partition = part)
resp.tables = tables
if np == len(parts):
mr = False
else:
mr = True
resp.response(req.context(), mr)
np = np + 1
def handle_UVETableAlarmReq(self, req):
status = False
if req.table == "all":
parts = self.tab_alarms.keys()
else:
parts = [req.table]
self._logger.info("Got UVETableAlarmReq : %s" % str(parts))
np = 1
for pt in parts:
resp = UVETableAlarmResp(table = pt)
uves = []
for uk,uv in self.tab_alarms[pt].iteritems():
for ak,av in uv.iteritems():
alm_copy = []
uai = av.get_uai(forced=True)
if uai:
alm_copy.append(copy.deepcopy(uai))
uves.append(UVEAlarmStateMachineInfo(
uai = UVEAlarms(name = uk, alarms = alm_copy),
uac = av.get_uac(), uas = av.get_uas()))
resp.uves = uves
if np == len(parts):
mr = False
else:
mr = True
resp.response(req.context(), mr)
np = np + 1
def handle_UVETablePerfReq(self, req):
status = False
if req.table == "all":
parts = self.tab_perf_prev.keys()
else:
parts = [req.table]
self._logger.info("Got UVETablePerfReq : %s" % str(parts))
np = 1
for pt in parts:
resp = UVETablePerfResp(table = pt)
resp.call_time = self.tab_perf_prev[pt].call_result()
resp.get_time = self.tab_perf_prev[pt].get_result()
resp.pub_time = self.tab_perf_prev[pt].pub_result()
resp.updates = self.tab_perf_prev[pt].get_n
if np == len(parts):
mr = False
else:
mr = True
resp.response(req.context(), mr)
np = np + 1
def partition_change(self, partno, enl):
"""
Call this function when getting or giving up
ownership of a partition
Args:
partno : Partition Number
enl : True for acquiring, False for giving up
Returns:
status of operation (True for success)
"""
status = False
if enl:
if partno in self._workers:
self._logger.info("Dup partition %d" % partno)
else:
cdisc = None
if self.disc:
cdisc = client.DiscoveryClient(
self._conf.discovery()['server'],
self._conf.discovery()['port'],
ModuleNames[Module.ALARM_GENERATOR],
'%s-%s-%d' % (self._hostname,
self._instance_id, partno))
ph = UveStreamProc(','.join(self._conf.kafka_broker_list()),
partno, self._conf.kafka_prefix()+"-uve-" + str(partno),
self._logger,
self.handle_uve_notifq, self._conf.host_ip(),
self.handle_resource_check,
self._instance_id,
self._conf.redis_server_port(),
self._conf.kafka_prefix()+"-workers")
ph.start()
self._workers[partno] = ph
self._uvestats[partno] = {}
tout = 1200
idx = 0
while idx < tout:
# When this partitions starts,
# uveq will get created
if partno not in self._uveq:
gevent.sleep(.1)
else:
break
idx += 1
if partno in self._uveq:
status = True
else:
# TODO: The partition has not started yet,
# but it still might start later.
# We possibly need to exit
status = False
self._logger.error("Unable to start partition %d" % partno)
else:
if partno in self._workers:
ph = self._workers[partno]
self._logger.error("Kill part %s" % str(partno))
ph.kill(timeout=60)
try:
res,db = ph.get(False)
except gevent.Timeout:
self._logger.error("Unable to kill partition %d" % partno)
return False
self._logger.error("Returned " + str(res))
self._uveqf[partno] = self._workers[partno].acq_time()
del self._workers[partno]
del self._uvestats[partno]
tout = 1200
idx = 0
while idx < tout:
# When this partitions stop.s
# uveq will get destroyed
if partno in self._uveq:
gevent.sleep(.1)
else:
break
idx += 1
if partno not in self._uveq:
status = True
else:
# TODO: The partition has not stopped yet
# but it still might stop later.
# We possibly need to exit
status = False
self._logger.error("Unable to stop partition %d" % partno)
else:
self._logger.info("No partition %d" % partno)
return status
def handle_PartitionOwnershipReq(self, req):
self._logger.info("Got PartitionOwnershipReq: %s" % str(req))
status = self.partition_change(req.partition, req.ownership)
resp = PartitionOwnershipResp()
resp.status = status
resp.response(req.context())
def process_stats(self):
''' Go through the UVEKey-Count stats collected over
the previous time period over all partitions
and send it out
'''
self.tab_perf_prev = copy.deepcopy(self.tab_perf)
for kt in self.tab_perf.keys():
#self.tab_perf_prev[kt] = copy.deepcopy(self.tab_perf[kt])
self.tab_perf[kt].reset()
s_partitions = set()
s_keys = set()
n_updates = 0
for pk,pc in self._workers.iteritems():
s_partitions.add(pk)
din = pc.stats()
dout = copy.deepcopy(self._uvestats[pk])
self._uvestats[pk] = {}
for ktab,tab in dout.iteritems():
utct = UVETableCount()
utct.keys = 0
utct.count = 0
for uk,uc in tab.iteritems():
s_keys.add(uk)
n_updates += uc
utct.keys += 1
utct.count += uc
au_obj = AlarmgenUpdate(name=self._sandesh._source + ':' + \
self._sandesh._node_type + ':' + \
self._sandesh._module + ':' + \
self._sandesh._instance_id,
partition = pk,
table = ktab,
o = utct,
i = None,
sandesh=self._sandesh)
self._logger.debug('send output stats: %s' % (au_obj.log()))
au_obj.send(sandesh=self._sandesh)
for ktab,tab in din.iteritems():
au_notifs = []
for kcoll,coll in tab.iteritems():
for kgen,gen in coll.iteritems():
for tk,tc in gen.iteritems():
tkc = UVETypeInfo()
tkc.type= tk
tkc.count = tc
tkc.generator = kgen
tkc.collector = kcoll
au_notifs.append(tkc)
au_obj = AlarmgenUpdate(name=self._sandesh._source + ':' + \
self._sandesh._node_type + ':' + \
self._sandesh._module + ':' + \
self._sandesh._instance_id,
partition = pk,
table = ktab,
o = None,
i = au_notifs,
sandesh=self._sandesh)
self._logger.debug('send input stats: %s' % (au_obj.log()))
au_obj.send(sandesh=self._sandesh)
au = AlarmgenStatus()
au.name = self._hostname
au.counters = []
au.alarmgens = []
ags = AlarmgenStats()
ags.instance = self._instance_id
ags.partitions = len(s_partitions)
ags.keys = len(s_keys)
ags.updates = n_updates
au.counters.append(ags)
agname = self._sandesh._source + ':' + \
self._sandesh._node_type + ':' + \
self._sandesh._module + ':' + \
self._sandesh._instance_id
au.alarmgens.append(agname)
atrace = AlarmgenStatusTrace(data = au, sandesh = self._sandesh)
self._logger.debug('send alarmgen status : %s' % (atrace.log()))
atrace.send(sandesh=self._sandesh)
def handle_PartitionStatusReq(self, req):
''' Return the entire contents of the UVE DB for the
requested partitions
'''
if req.partition == -1:
parts = self._workers.keys()
else:
parts = [req.partition]
self._logger.info("Got PartitionStatusReq: %s" % str(parts))
np = 1
for pt in parts:
resp = PartitionStatusResp()
resp.partition = pt
if self._workers.has_key(pt):
resp.enabled = True
resp.offset = self._workers[pt]._partoffset
resp.uves = []
for kcoll,coll in self._workers[pt].contents().iteritems():
uci = UVECollInfo()
uci.collector = kcoll
uci.uves = []
for kgen,gen in coll.iteritems():
ugi = UVEGenInfo()
ugi.generator = kgen
ugi.uves = []
for tabk,tabc in gen.iteritems():
for uk,uc in tabc.iteritems():
ukc = UVEKeyInfo()
ukc.key = tabk + ":" + uk
ukc.types = []
for tk,tc in uc.iteritems():
uvtc = UVETypeCount()
uvtc.type = tk
uvtc.count = tc["c"]
uvtc.agg_uuid = str(tc["u"])
ukc.types.append(uvtc)
ugi.uves.append(ukc)
uci.uves.append(ugi)
resp.uves.append(uci)
else:
resp.enabled = False
if np == len(parts):
mr = False
else:
mr = True
resp.response(req.context(), mr)
np = np + 1
def alarm_ack_callback(self, alarm_req):
'''
Callback function for sandesh alarm acknowledge request.
This method is passed as a parameter in the init_generator().
Upon receiving the SandeshAlarmAckRequest, the corresponding
handler defined in the sandesh library would invoke this callback.
This function returns one of the response codes defined in
SandeshAlarmAckResponseCode.
'''
self._logger.debug('Alarm acknowledge request callback: %s' %
str(alarm_req))
table = alarm_req.table
uname = alarm_req.table+':'+alarm_req.name
atype = alarm_req.type
try:
alarm_type = \
self.tab_alarms[table][uname][atype].get_uai()
except KeyError:
return SandeshAlarmAckResponseCode.ALARM_NOT_PRESENT
else:
# Either alarm is not present ot it is not in Active or Soak_Idle
# state
if alarm_type is None:
return SandeshAlarmAckResponseCode.ALARM_NOT_PRESENT
# Either the timestamp sent by the client is invalid or
# the alarm is updated.
if alarm_type.timestamp != alarm_req.timestamp:
return SandeshAlarmAckResponseCode.INVALID_ALARM_REQUEST
# If the alarm was already acknowledged, just return SUCCESS.
if alarm_type.ack:
return SandeshAlarmAckResponseCode.SUCCESS
# All sanity checks passed. Acknowledge the alarm.
alarm_type.ack = True
alarm = []
for nm, asm in self.tab_alarms[table][uname].iteritems():
uai = asm.get_uai()
if uai:
alarm.append(copy.deepcopy(uai))
alarm_data = UVEAlarms(name=alarm_req.name, alarms=alarm)
alarm_sandesh = AlarmTrace(data=alarm_data, table=table,
sandesh=self._sandesh)
alarm_sandesh.send(sandesh=self._sandesh)
return SandeshAlarmAckResponseCode.SUCCESS
# end alarm_ack_callback
def disc_cb_coll(self, clist):
'''
Analytics node may be brought up/down any time. For UVE aggregation,
alarmgen needs to know the list of all Analytics nodes (redis-uves).
Periodically poll the Collector list [in lieu of
redi-uve nodes] from the discovery.
'''
self._logger.error("Discovery Collector callback : %s" % str(clist))
newlist = []
for elem in clist:
ipaddr = elem["ip-address"]
cpid = 0
if "pid" in elem:
cpid = int(elem["pid"])
newlist.append((ipaddr, self._conf.redis_server_port(), cpid))
self._us.update_redis_uve_list(newlist)
def disc_cb_ag(self, alist):
'''
Analytics node may be brought up/down any time. For partitioning,
alarmgen needs to know the list of all Analytics nodes (alarmgens).
Periodically poll the alarmgen list from the discovery service
'''
self._logger.error("Discovery AG callback : %s" % str(alist))
newlist = []
for elem in alist:
ipaddr = elem["ip-address"]
inst = elem["instance-id"]
newlist.append(ipaddr + ":" + inst)
# We should always include ourselves in the list of memebers
newset = set(newlist)
newset.add(self._libpart_name)
newlist = list(newset)
if not self._libpart:
self._libpart = self.start_libpart(newlist)
else:
self._libpart.update_cluster_list(newlist)
def run_cpu_mon(self):
alarmgen_cpu_info = CpuInfoData()
while True:
before = time.time()
mod_cpu_info = ModuleCpuInfo()
mod_cpu_info.module_id = self._moduleid
mod_cpu_info.instance_id = self._instance_id
mod_cpu_info.cpu_info = alarmgen_cpu_info.get_cpu_info(
system=False)
mod_cpu_state = ModuleCpuState()
mod_cpu_state.name = self._hostname
mod_cpu_state.module_cpu_info = [mod_cpu_info]
alarmgen_cpu_state_trace = ModuleCpuStateTrace(\
data=mod_cpu_state, sandesh = self._sandesh)
alarmgen_cpu_state_trace.send(sandesh=self._sandesh)
aly_cpu_state = AnalyticsCpuState()
aly_cpu_state.name = self._hostname
aly_cpu_info = ProcessCpuInfo()
aly_cpu_info.module_id= self._moduleid
aly_cpu_info.inst_id = self._instance_id
aly_cpu_info.cpu_share = mod_cpu_info.cpu_info.cpu_share
aly_cpu_info.mem_virt = mod_cpu_info.cpu_info.meminfo.virt
aly_cpu_info.mem_res = mod_cpu_info.cpu_info.meminfo.res
aly_cpu_state.cpu_info = [aly_cpu_info]
aly_cpu_state_trace = AnalyticsCpuStateTrace(\
data=aly_cpu_state, sandesh = self._sandesh)
aly_cpu_state_trace.send(sandesh=self._sandesh)
# Send out the UVEKey-Count stats for this time period
self.process_stats()
duration = time.time() - before
if duration < 60:
gevent.sleep(60 - duration)
else:
self._logger.error("Periodic collection took %s sec" % duration)
def run(self):
self.gevs = [ gevent.spawn(self.run_cpu_mon),
gevent.spawn(self.run_uve_processing)]
if self.disc:
sp1 = ServicePoller(self._logger, CollectorTrace,
self.disc,
COLLECTOR_DISCOVERY_SERVICE_NAME,
self.disc_cb_coll, self._sandesh)
sp1.start()
self.gevs.append(sp1)
sp2 = ServicePoller(self._logger, AlarmgenTrace,
self.disc, ALARM_GENERATOR_SERVICE_NAME,
self.disc_cb_ag, self._sandesh)
sp2.start()
self.gevs.append(sp2)
try:
gevent.joinall(self.gevs)
except KeyboardInterrupt:
print 'AlarmGen Exiting on ^C'
except gevent.GreenletExit:
self._logger.error('AlarmGen Exiting on gevent-kill')
except:
raise
finally:
self._logger.error('AlarmGen stopping everything')
self.stop()
exit()
def stop(self):
self._sandesh._client._connection.set_admin_state(down=True)
self._sandesh.uninit()
if self._config_handler:
self._config_handler.stop()
l = len(self.gevs)
for idx in range(0,l):
self._logger.error('AlarmGen killing %d of %d' % (idx+1, l))
self.gevs[0].kill()
self._logger.error('AlarmGen joining %d of %d' % (idx+1, l))
self.gevs[0].join()
self._logger.error('AlarmGen stopped %d of %d' % (idx+1, l))
self.gevs = self.gevs[1:]
def sigterm_handler(self):
self.stop()
exit()
def setup_controller(argv):
config = CfgParser(argv)
config.parse()
return Controller(config)
def main(args=None):
controller = setup_controller(args or ' '.join(sys.argv[1:]))
gevent.hub.signal(signal.SIGTERM, controller.sigterm_handler)
gv = gevent.getcurrent()
gv._main_obj = controller
controller.run()
if __name__ == '__main__':
main()
|
codilime/contrail-controller
|
src/opserver/alarmgen.py
|
Python
|
apache-2.0
| 94,566
|
from typing import Any, Type
from tornado.web import RequestHandler
from malcolm.core import Info
class HandlerInfo(Info):
"""Tornado RequestHandlers that should make up the webserver application
Args:
regex: Path for this handler to get requests from. E.g. r"/ws"
request_class: Request handler to instantiate for this
**kwargs: Keyword args to be passed to request_class constructor
"""
def __init__(
self, regexp: str, request_class: Type[RequestHandler], **kwargs: Any
) -> None:
self.regexp = regexp
self.request_class = request_class
self.kwargs = kwargs
|
dls-controls/pymalcolm
|
malcolm/modules/web/infos.py
|
Python
|
apache-2.0
| 643
|
# -*- coding: utf-8 -*-
import time
from hil import Component
class Sound(Component):
def run(self, id, participation, role):
print(chr(7))
return 'Sound'
|
fvioz/hil-sample
|
apps/sound/main.py
|
Python
|
apache-2.0
| 167
|
from orderedset import OrderedSet
from structurizr.model.component import Component
from structurizr.model.container import Container
from structurizr.model.enterprise import Enterprise
from structurizr.model.interaction_style import InteractionStyle
from structurizr.model.location import Location
from structurizr.model.person import Person
from structurizr.model.software_system import SoftwareSystem
class _SequentialIntegerGeneratorStrategy(object):
def generate_id(self, software_system):
pass
def found(self, param):
pass
class Model:
def __init__(self):
self._id_generator = _SequentialIntegerGeneratorStrategy()
self._elements_by_id = {}
self._relationships_by_id = {}
self._enterprise = None
self._people = OrderedSet()
self._software_systems = OrderedSet()
def get_enterprise(self):
return self._enterprise
def set_enterprise(self, enterprise):
if not isinstance(enterprise, Enterprise):
raise TypeError("{!r} is not an {}".format(enterprise, Enterprise.__name__))
self._enterprise = enterprise
def add_software_system(self, name, description, location=Location.UNSPECIFIED):
if self.get_software_system_with_name(name) is not None:
raise ValueError("A software system named {} already exists".format(name))
software_system = SoftwareSystem()
software_system.set_location(location)
software_system.set_name(name)
software_system.set_description(description)
self._software_systems.add(software_system)
software_system.set_id(self._id_generator.generate_id(software_system))
self._add_element_to_internal_structures(software_system)
return software_system
def add_person(self, name, description, location=Location.UNSPECIFIED):
if self.get_person_with_name(name) is not None:
raise ValueError("A person named {} already exists".format(name))
person = Person()
person.set_location(location)
person.set_name(name)
person.set_description(description)
self._people.add(person)
person.set_id(self._id_generator.generate_id(person))
self._add_element_to_internal_structures(person)
return person
def add_container(self, software_system, name, description, technology):
if not isinstance(software_system, SoftwareSystem):
raise TypeError("{} is not a {}".format(software_system, SoftwareSystem.__name__))
if self.get_container_with_name(name) is not None:
raise ValueError("A software system named {} already exists".format(name))
container = Container()
container.set_name(name)
container.set_description(description)
container.set_technology(technology)
container.set_parent(software_system)
software_system.add_existing_container(container)
container.set_id(self._id_generator.generate_id(container))
self._add_element_to_internal_structures(container)
def add_component_of_type(self, container, name, type_name, description, technology):
if not isinstance(container, Container):
raise TypeError("{} is not a {}".format(container, Container.__name__))
component = Component()
component.set_name(name)
component.set_type(type_name)
component.set_description(description)
component.set_technology(technology)
component.set_parent(container)
container.add_existing_component(component)
component.set_id(self._id_generator.generate_id(component))
self._add_element_to_internal_structures(component)
def add_component(self, container, name, description):
if not isinstance(container, Container):
raise TypeError("{} is not a {}".format(container, Container.__name__))
component = Component()
component.set_name(name)
component.set_description(description)
component.set_parent(container)
container.add_existing_component(component)
component.set_id(self._id_generator.generate_id(component))
self._add_element_to_internal_structures(component)
def add_relationship(self, source, destination, description, technology=None, interaction_style=InteractionStyle.SYNCHRONOUS):
relationship = Relationship(source, destination, description, technology, interaction_style)
if self.add_existing_relationship(relationship):
return relationship
return None
def add_existing_relationship(self, relationship):
if not relationship.get_source().has(relationship):
relationship.set_id(self._id_generator.generate_id(relationship))
relationship.get_source().add_relationship(relationship)
self._add_relationship_to_internal_structures(relationship)
return True
return False
def _add_element_to_internal_structures(self, element):
self._elements_by_id[element.get_id()] = element
element.set_model(self)
self._id_generator.found(element.get_id())
def _add_relationship_to_internal_structures(self, relationship):
self._relationships_by_id[relationship.get_id()] = relationship
self._id_generator.found(relationship.get_id())
def get_elements(self):
return set(self._elements_by_id.values()) # TODO: Returning a copy again here?
def get_element(self, id):
return self._elements_by_id[id]
def get_relationships(self):
return set(self._relationships_by_id.values())
def get_relationship(self, id):
return self._relationships_by_id[id]
def get_people(self):
return self._people.copy()
def get_software_systems(self):
return self._software_systems.copy()
# TODO: Omitting the hydrate stuff for now until I have a better understanding
def contains(self, element):
return element in self._elements_by_id.values()
def get_software_system_with_name(self, name):
return next((ss for ss in self._software_systems if ss.get_name() == name), None)
def get_software_system_with_id(self, id):
return next((ss for ss in self._software_systems if ss.get_id() == id), None)
def get_person_with_name(self, name):
return next((p for p in self._people if p.get_name() == name), None)
def add_implicit_relationships(self):
implicit_relationships = set()
for relationship in self.get_relationships():
source = relationship.get_source()
destination = relationship.get_destination()
while source != None:
while destination != None:
if not source.has_efferent_relationships_with(destination):
if self._propagated_relationship_is_allowed(source, destination):
implicit_relationship = self.add_relationship(source, destination, "")
if implicit_relationship is not None:
implicit_relationship.add(implicit_relationship)
destination = destination.get_parent()
destination = relationship.get_destination()
source = source.get_parent()
return implicit_relationships
def _propagated_relationship_is_allowed(self, source, destination):
if source == destination:
return False
if source.get_parent() is not None:
if destination == source.get_parent():
return False
if source.get_parent().get_parent() is not None:
if destination == source.get_parent().get_parent():
return False
if destination.get_parent() is not None:
if source == destination.get_parent():
return False
if destination.get_parent().get_parent() is not None:
if source == destination.get_parent().get_parent():
return False
return True
def is_empty(self):
return (len(self._people) != 0) or (len(self._software_systems) != 0)
|
sixty-north/structurizr-python
|
structurizr/model/model.py
|
Python
|
apache-2.0
| 8,215
|
class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
N = len(s)
isValid = [[False] * (N+1) for _ in xrange(N+1)]
for i in xrange(N+1):
isValid[i][i] = True
if i < N:
isValid[i][i+1] = (s[i] == '*') # len 1 str is valid only when it's '*'
for l in xrange(2, N+1):
for i in xrange(N-l+1):
j = i + l
# print 'check', i, j, isValid[i+1][j-1]
if s[i] == ')' or s[j-1] == '(':
isValid[i][j] = False
continue
if isValid[i+1][j-1]:
isValid[i][j] = True
continue
for k in xrange(i+1, j):
if isValid[i][k] and isValid[k][j]:
isValid[i][j] = True
break
# print isValid
return isValid[0][N]
class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
N = len(s)
ns = set([0])
for c in s:
if c == '(':
ns = {n+1 for n in ns}
elif c == ')':
ns = {n-1 for n in ns if n >= 1}
else:
ns = {n+z for n in ns for z in (-1, 0, 1) if n+z >= 0}
if not ns:
return False
return 0 in ns
print Solution().checkValidString("")
print Solution().checkValidString("*")
print Solution().checkValidString("()")
print Solution().checkValidString("(*()")
|
xiaonanln/myleetcode-python
|
src/678. Valid Parenthesis String.py
|
Python
|
apache-2.0
| 1,229
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""A module with configuration of the ProgramAuditOwner role's permissions."""
# pylint: disable=invalid-name
scope = "Audit Implied"
description = """
A user with the ProgramOwner role for a private program will also have this
role in the audit context for any audit created for that program.
"""
permissions = {
"read": [
"Request",
"Assessment",
"AssessmentTemplate",
"Issue",
"UserRole",
"Audit",
"AuditObject",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"Context",
],
"create": [
"Request",
"Assessment",
"AssessmentTemplate",
"Issue",
"UserRole",
"Audit",
"AuditObject",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting"
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"Assessment",
"AssessmentTemplate",
"Issue",
"UserRole",
"Audit",
"AuditObject",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting"
],
"delete": [
"UserRole",
"Request",
"Assessment",
"AssessmentTemplate",
"Issue",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting"
"AuditObject",
"Audit"
]
}
|
prasannav7/ggrc-core
|
src/ggrc_basic_permissions/roles/ProgramAuditOwner.py
|
Python
|
apache-2.0
| 1,926
|
#!/usr/bin/env python2
# Copyright (c) 2016 Martin F. Falatic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reading data from a Particle-based environmental monitoring project
"""
from __future__ import print_function
import re
import sys
from contextlib import contextmanager
import os
import time
import json
import signal
from datetime import datetime
# import base64
import argparse
import requests
import dateutil.parser
import dateutil.zoneinfo
import dateutil.tz
from sseclient import SSEClient
PROGNAME = os.path.basename(sys.argv[0])
SCREEN_WIDTH = 78
TIMEZONE = dateutil.tz.tzlocal() # dateutil.zoneinfo.gettz('America/Los_Angeles')
CO2_BASELINE_PPM = 400
RETRY_WAIT_PERIOD = 5
SIGNUMS_TO_NAMES = dict((getattr(signal, n), n) \
for n in dir(signal) if n.startswith('SIG') and '_' not in n)
SIGNAMES_TO_NUMS = dict((v, k) for k, v in SIGNUMS_TO_NAMES.items())
API_ROOT = "https://api.particle.io/v1/devices/events/"
# ("2016-05-16T01:32:09.712000-07:00 76.46 47.30 1240 0x01D900F7D1 0x2104D8FD")
DATA_HEADER = "# Date(ISO8601) T(F) RH(%) CO2ppm RHT_raw CO2_raw"
@contextmanager
def _flexout_open(filename, mode='Ur'):
if filename == None or filename == '':
fhandle = sys.stdout
else:
fhandle = open(filename, mode)
try:
yield fhandle
finally:
if not (filename is None or filename == ''):
fhandle.close()
def signals_init():
'''Initialize signal handlers'''
initted_signals = []
for sig in ['SIGINT', 'SIGTERM', 'SIGBREAK']:
if sig in SIGNAMES_TO_NUMS:
signal.signal(SIGNAMES_TO_NUMS[sig], signals_handler)
initted_signals.append(sig)
print("-- Initialized signal handlers: {}".format(' '.join(initted_signals)))
# print('Press Ctrl+C')
# signal.pause()
def signals_handler(sig, frame):
'''Signals handler'''
print('\n-- Received signal {} ({}) - exiting\n'.format(sig, SIGNUMS_TO_NAMES[sig]))
sys.exit(0)
def process_message(msg, msg_num, co2prev):
'''Process an individual message'''
parsed = {}
try:
parsed = json.loads(msg.data)
except ValueError:
parsed['data'] = ''
return co2prev
# print(msg_num, type(msg.data), msg.data, parsed['data'])
date = str(parsed['published_at'])
iso_date = dateutil.parser.parse(date).astimezone(TIMEZONE).isoformat()
marker = ''
data_regex = r'^T=(\d+\.\d+)\s*F\sRH=(\d+\.\d+)\s*\%\sCO2=(\d+)\s*ppm RHTRAW=(.*) CO2RAW=(.*)$'
match = re.match(data_regex, parsed['data'])
if match:
temp = float(match.group(1))
rhum = float(match.group(2))
co2ppm = int(match.group(3))
rhtraw = match.group(4)
co2raw = match.group(5) # base64.b16decode(match.group(5))
if co2ppm == 0:
co2ppm = co2prev
else:
co2prev = co2ppm
if rhum > 100.0:
marker = '! '
data_field = "{:6.2f} {:6.2f} {:6d} 0x{:s} 0x{:s}".\
format(temp, rhum, co2ppm, rhtraw, co2raw)
else:
marker = '! '
data_field = parsed['data']
with _flexout_open(output_file, 'a') as fstream:
print("{:s}{:s} {:s}".format(marker, iso_date, data_field), file=fstream)
fstream.flush()
return co2prev
def run_message_loop(api_url):
'''The main processing loop '''
co2prev = CO2_BASELINE_PPM
msg_num = 1
error_str = ''
try:
messages = SSEClient(api_url, timeout=5)
for msg in messages:
msg_num += 1
returned_data = process_message(msg=msg, msg_num=msg_num, co2prev=co2prev)
if returned_data is not None:
(co2prev) = returned_data
except requests.exceptions.Timeout as err:
error_str = repr(err)
except requests.exceptions.TooManyRedirects as err:
error_str = repr(err)
except requests.exceptions.RequestException as err:
error_str = repr(err)
else:
error_str = "Unexpected exit of main loop"
return error_str
if __name__ == "__main__":
print('-'*SCREEN_WIDTH)
print("-- Using Python {}".format(sys.version))
signals_init()
print()
argparser = argparse.ArgumentParser(prog=PROGNAME,
usage='%(prog)s [options]')
argparser.add_argument('--datasource', action='store', required=True,
dest='data_source', metavar='<name>',
help='API data source name')
argparser.add_argument('--token', action='store', required=True,
dest='access_token', metavar='<token>',
help='API access token')
argparser.add_argument('--outputfile', action='store', required=False,
dest='output_file', metavar='<file>',
help='output file name (else stdout)')
args = argparser.parse_args(sys.argv[1:])
data_source = args.data_source
access_token = args.access_token
output_file = args.output_file
print('-- Output_file = {}'.format(output_file))
print()
api_url = API_ROOT+data_source+'?access_token='+access_token
with _flexout_open(output_file, 'a') as fstream:
print(DATA_HEADER, file=fstream)
while True:
errstr = run_message_loop(api_url=api_url)
raw_date = str(datetime.now(dateutil.tz.tzlocal()))
iso_date = dateutil.parser.parse(raw_date).astimezone(dateutil.tz.tzlocal()).isoformat()
with _flexout_open(output_file, 'a') as fstream:
marker = '! '
print("{:s}{:s} Error: {:s}".format(marker, iso_date, errstr), file=fstream)
fstream.flush()
print('-'*SCREEN_WIDTH)
print(errstr)
print('-'*SCREEN_WIDTH)
time.sleep(RETRY_WAIT_PERIOD)
|
MartyMacGyver/EnvironmentalSensorManager
|
backend/environmental_data_logger.py
|
Python
|
apache-2.0
| 6,369
|
import unittest
from graphene.storage.base.node_store import *
class TestNodeStoreMethods(unittest.TestCase):
def setUp(self):
GrapheneStore.TESTING = True
def tearDown(self):
"""
Clean the database so that the tests are independent of one another
"""
graphene_store = GrapheneStore()
graphene_store.remove_test_datafiles()
def test_empty_init(self):
"""
Test that initializing an empty NodeStore succeeds (file is opened)
"""
try:
NodeStore()
except IOError:
self.fail("NodeStore initializer failed: db file failed to open.")
def test_double_init(self):
"""
Test that initializing an empty NodeStore succeeds when
repeated; i.e. the old file is reopened and no errors occur.
"""
try:
NodeStore()
except IOError:
self.fail("NodeStore initializer failed: "
"db file failed to open.")
try:
NodeStore()
except IOError:
self.fail("NodeStore initializer failed on second attempt: "
"db file failed to open.")
def test_non_interface_file_creation(self):
"""
NOTE: GeneralStore Test, only tested here
Test that writing to a NodeStore does not fail when the db file
was created outside of the interface (i.e. touch <NodeStore.db>)
This error was caused because when a file is touched, it might not
get padded, leading to a plethora of errors since index 0 is not
supposed to be usable.
"""
graphene_store = GrapheneStore()
# Create db file outside interface
open(graphene_store.datafilesDir + NodeStore.FILE_NAME, "w+").close()
try:
node_store = NodeStore()
node = Node(1, False, 1, 1, 1)
node_store.write_item(node)
except Exception:
self.fail("Writing failed when db file was touched externally")
def test_invalid_write(self):
"""
Test that writing a node to index 0 raises an error
"""
node_store = NodeStore()
empty_node = Node()
with self.assertRaises(ValueError):
node_store.write_item(empty_node)
def test_invalid_read(self):
"""
Test that reading a node from index 0 raises an error
"""
node_store = NodeStore()
with self.assertRaises(ValueError):
node_store.item_at_index(0)
def test_empty_read(self):
"""
Make sure that reading an item when the file is empty returns None
"""
node_store = NodeStore()
# Read an uncreated item
no_item = node_store.item_at_index(1)
# Make sure it returned None
self.assertEquals(no_item, GeneralStore.EOF)
def test_write_read_1_node(self):
"""
Tests that the node written to the NodeStore is the node that is read.
"""
node_store = NodeStore()
# Create a node and add it to the NodeStore
node = Node(1, False, 1, 1, 1)
node_store.write_item(node)
# Read the node from the NodeStore file
node_file = node_store.item_at_index(node.index)
# Assert that the values are the same
self.assertEquals(node, node_file)
def test_write_read_2_nodes(self):
"""
Tests when 2 nodes are written after 1 node to the NodeStore
"""
node_store = NodeStore()
# Create one node and write it to the NodeStore
node1 = Node(1, False, 1, 1, 1)
node_store.write_item(node1)
# Create 2 nodes and add them to the NodeStore
node2 = Node(2, False, 2, 2, 2)
node3 = Node(3, False, 3, 3, 3)
node_store.write_item(node2)
node_store.write_item(node3)
# Read the nodes from the NodeStore file
node1_file = node_store.item_at_index(node1.index)
node2_file = node_store.item_at_index(node2.index)
node3_file = node_store.item_at_index(node3.index)
# Make sure their values are the same
self.assertEquals(node1, node1_file)
self.assertEquals(node2, node2_file)
self.assertEquals(node3, node3_file)
def test_overwrite_node(self):
"""
Tests that overwriting a node in a database with 3 nodes works
"""
node_store = NodeStore()
# Create 3 nodes
node1 = Node(1, False, 1, 1, 1)
node2 = Node(2, False, 2, 2, 2)
node3 = Node(3, False, 3, 3, 3)
# Write them to the nodestore
node_store.write_item(node1)
node_store.write_item(node2)
node_store.write_item(node3)
# Verify that they are in the store as expected
node1_file = node_store.item_at_index(node1.index)
self.assertEquals(node1, node1_file)
node2_file = node_store.item_at_index(node2.index)
self.assertEquals(node2, node2_file)
node3_file = node_store.item_at_index(node3.index)
self.assertEquals(node3, node3_file)
# Create a new node2 and overwrite the old node2
new_node2 = Node(2, True, 8, 8, 8)
node_store.write_item(new_node2)
# Verify that the data is still as expected
node1_file = node_store.item_at_index(node1.index)
self.assertEquals(node1, node1_file)
new_node2_file = node_store.item_at_index(new_node2.index)
self.assertEquals(new_node2, new_node2_file)
node3_file = node_store.item_at_index(node3.index)
self.assertEquals(node3, node3_file)
def test_delete_node(self):
"""
Tests that deleting 2 nodes in a database with 3 nodes works
"""
node_store = NodeStore()
# Create 3 nodes
node1 = Node(1, True, 1, 1, 1)
node2 = Node(2, True, 2, 2, 2)
node3 = Node(3, True, 3, 3, 3)
# Write them to the nodestore
node_store.write_item(node1)
node_store.write_item(node2)
node_store.write_item(node3)
# Verify that they are in the store as expected
node1_file = node_store.item_at_index(node1.index)
self.assertEquals(node1, node1_file)
node2_file = node_store.item_at_index(node2.index)
self.assertEquals(node2, node2_file)
node3_file = node_store.item_at_index(node3.index)
self.assertEquals(node3, node3_file)
# Delete nodes 1 and 2
node_store.delete_item(node1)
# Deleting from end of file, should return EOF when read
node_store.delete_item(node3)
# Verify deleted nodes are deleted
deleted_node1_file = node_store.item_at_index(node1.index)
self.assertIsNone(deleted_node1_file)
deleted_node3_file = node_store.item_at_index(node3.index)
self.assertEquals(deleted_node3_file, EOF)
# Verify unaffected node is as expected
node2_file = node_store.item_at_index(node2.index)
self.assertEquals(node2, node2_file)
def test_file_truncation(self):
"""
NOTE: GeneralStore Test, only tested here
Test that the file is truncated when deleting from the end of the
file
"""
node_store = NodeStore()
# Create 3 nodes
node1 = Node(1, True, 1, 1, 1)
node2 = Node(2, True, 2, 2, 2)
node3 = Node(3, True, 3, 3, 3)
# Write them to the nodestore
node_store.write_item(node1)
node_store.write_item(node2)
node_store.write_item(node3)
# Verify that they are in the store as expected
node1_file = node_store.item_at_index(node1.index)
self.assertEquals(node1, node1_file)
node2_file = node_store.item_at_index(node2.index)
self.assertEquals(node2, node2_file)
node3_file = node_store.item_at_index(node3.index)
self.assertEquals(node3, node3_file)
# Delete node 3, make sure file reduced in size (truncated)
old_size = node_store.get_file_size()
node_store.delete_item(node3)
new_size = node_store.get_file_size()
self.assertNotEqual(old_size, new_size)
|
PHB-CS123/graphene
|
tests/storage/base/test_node_store.py
|
Python
|
apache-2.0
| 8,220
|
# -*- coding: utf-8 -*-
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest import run_test
unicode = str
unichr = chr
class UnicodeTest(unittest.TestCase):
def test_constructor(self):
self.assertEqual('', unicode())
self.assertEqual('None', unicode(None))
self.assertEqual('ä', unicode('ä'))
self.assertEqual('ä', unicode(b'\xc3\xa4', 'utf-8')) # TODO kunom: reasonable?
def test_raw_unicode_escape(self):
for raw_unicode_escape in ['raw-unicode-escape', 'raw unicode escape']:
s = unicode(b'\u0663\u0661\u0664 ', raw_unicode_escape)
self.assertEqual(len(s), 4)
self.assertEqual(int(s), 314)
s = unicode(b'\u0663.\u0661\u0664 ', raw_unicode_escape)
self.assertEqual(float(s), 3.14)
def test_raw_unicode_escape_noescape_lowchars(self):
for raw_unicode_escape in ['raw-unicode-escape', 'raw unicode escape']:
for i in range(0x100):
self.assertEqual(unichr(i).encode(raw_unicode_escape), bytes([i]))
self.assertEqual(unichr(0x100).encode(raw_unicode_escape), rb'\u0100')
def test_raw_unicode_escape_dashes(self):
"""Make sure that either dashes or underscores work in raw encoding name"""
ok = True
try:
unicode(b'hey', 'raw_unicode-escape')
except LookupError:
ok = False
self.assertTrue(ok, "dashes and underscores should be interchangable")
def test_raw_unicode_escape_trailing_backslash(self):
self.assertEqual(unicode(b'\\', 'raw_unicode_escape'), u'\\')
def test_unicode_error(self):
from iptest.misc_util import ip_supported_encodings
from _codecs import register_error
def handler(ex):
self.assertEqual(ex.object, u'\uac00')
return (u"", ex.end)
register_error("test_unicode_error", handler)
for mode in ip_supported_encodings:
unichr(0xac00).encode(mode, "test_unicode_error")
def test_ignore(self):
"""only UTF8, no encoding fallbacks..."""
self.assertEqual(unicode(b'', 'ascii', 'ignore'), '')
self.assertEqual(unicode(b'\xff', 'ascii', 'ignore'), '')
self.assertEqual(unicode(b'a\xffb\xffc\xff', 'ascii', 'ignore'), 'abc')
def test_cp19005(self):
foo = u'\xef\xbb\xbf'
self.assertEqual(repr(foo), r"''")
def test_cp34689(self):
xx_full_width_a = u'xx\uff21'
caught = False
try:
dummy = bytes(xx_full_width_a, "ascii")
except UnicodeEncodeError as ex:
caught = True
self.assertEqual(ex.encoding, 'ascii')
self.assertEqual(ex.start, 2)
self.assertEqual(ex.end, 3)
self.assertEqual(ex.object, u'xx\uff21')
self.assertTrue(ex.reason is not None)
self.assertTrue(len(ex.reason) > 0)
self.assertTrue(caught)
def test_gh590(self):
self.assertEqual(unicode(bytes(range(0x80, 0x100)), 'ascii', 'replace'), u'\ufffd'*0x80)
def test_escape(self):
self.assertEqual(r"a\u", "a\\u")
with self.assertRaises(UnicodeDecodeError):
b"a\\u".decode("unicode-escape")
with self.assertRaises(UnicodeDecodeError):
b"a\\u".decode("raw-unicode-escape")
self.assertEqual(b"\\a\\u1234".decode("unicode-escape"), "\x07\u1234")
self.assertEqual(b"\\a\\u1234".decode("raw-unicode-escape"), "\\a\u1234")
run_test(__name__)
|
IronLanguages/ironpython3
|
Tests/test_unicode.py
|
Python
|
apache-2.0
| 3,711
|
from minicps.devices import Tank
from minicps.devices import PLC
from scipy.integrate import odeint
from utils import *
import numpy as np
import sys
import time
import math
import logging
import signal
import sys
Q101 = ('Q101', 1)
Q102 = ('Q102', 1)
LIT101 = ('LIT101', 1)
LIT102 = ('LIT102', 1)
LIT103 = ('LIT103', 1)
PLC_ADDR = IP['plc101']
class RawWaterTank(PLC):
def sigint_handler(self, sig, frame):
print "I received a SIGINT!"
sys.exit(0)
def plant_model(self, l, t, q):
MQ1, MQ2 = q
L1, L2, L3 = l
# System of 3 differential equations of the water tanks
f = [(MQ1 - mu13*sn*np.sign(L1-L3)*math.sqrt(2*g*abs(L1-L3)))/s,
(MQ2 + mu32*sn*np.sign(L3-L2)*math.sqrt(2*g*abs(L3-L2)) - mu20*sn*math.sqrt(2*g*L2))/s,
(mu13*sn*np.sign(L1-L3)*math.sqrt(2*g*abs(L1-L3)) - mu32*sn*np.sign(L3-L2)*math.sqrt(abs(2*g*abs(L3-L2))))/s
]
return f
def pre_loop(self):
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigint_handler)
logging.basicConfig(filename="plant.log", level=logging.DEBUG)
logging.debug('plant enters pre_loop')
self.Y1= 0.4
self.Y2=0.2
self.Y3=0.3
self.set(LIT101, self.Y1)
self.set(LIT102, self.Y2)
self.set(LIT103, self.Y3)
self.Q1 = Q1
self.Q2 = Q2
self.set(Q101, self.Q1)
self.set(Q102, self.Q2)
# These vectors are used by the model
self.l = [self.Y1, self.Y2, self.Y3]
self.abserr = 1.0e-8
self.relerr = 1.0e-6
self.lock = 0.0
def main_loop(self):
count = 0
#logging.debug('starting simulation')
#logging.debug('Initial values: L1: ', self.l[0], ' L2: ', self.l[1], ' L3: ', self.l[2])
stoptime = 1
numpoints = 100
t = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]
while(count <= PP_SAMPLES):
print count, " ", self.l
self.Q1 = float(self.get(Q101))
self.Q2 = float(self.get(Q102))
self.q = [self.Q1, self.Q2]
wsol = odeint(self.plant_model, self.l, t, args=(self.q,),atol=self.abserr, rtol=self.relerr)
#print "dl/dt ", wsol
if (wsol[-1][0]) > 1.0:
wsol[-1][0] = 1.0
if (wsol[-1][1]) > 1.0:
wsol[-1][1] = 1.0
if (wsol[-1][2]) > 1.0:
wsol[-1][2] = 1.0
self.l=[wsol[-1][0], wsol[-1][1], wsol[-1][2]]
#Update the values in the database
self.set(LIT101, self.l[0])
self.set(LIT102, self.l[1])
self.set(LIT103, self.l[2])
count += 1
#self.lock = float(self.receive(LIT101, PLC_ADDR))
time.sleep(PLC_PERIOD_SEC)
if __name__ == '__main__':
plc101 = RawWaterTank(name='plant101',state=STATE,protocol=TANK_PROTOCOL,memory=GENERIC_DATA,disk=GENERIC_DATA)
|
afmurillo/ICS-SDN
|
francisco-topo/physical_process.py
|
Python
|
apache-2.0
| 2,582
|
#!/usr/bin/env python
# Copyright (c) 2014 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import neutronclient.v2_0.client
from oslo_log import log as logging
from congress import data_types
from congress.datasources import constants
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
LOG = logging.getLogger(__name__)
IngressEgress = data_types.create_congress_enum_type(
'IngressEgress', ('ingress', 'egress'), data_types.Str)
data_types.TypesRegistry.register(IngressEgress)
FloatingIPStatus = data_types.create_congress_enum_type(
'FloatingIPStatus', ('ACTIVE', 'DOWN', 'ERROR'), data_types.Str,
catch_all_default_value='OTHER')
data_types.TypesRegistry.register(FloatingIPStatus)
NeutronStatus = data_types.create_congress_enum_type(
'NeutronStatus', ('ACTIVE', 'DOWN', 'BUILD', 'ERROR'), data_types.Str,
catch_all_default_value='OTHER')
data_types.TypesRegistry.register(NeutronStatus)
IPVersion = data_types.create_congress_enum_type(
'IPv4IPv6', (4, 6), data_types.Int)
data_types.TypesRegistry.register(IPVersion)
class NeutronV2Driver(datasource_driver.PollingDataSourceDriver,
datasource_driver.ExecutionDriver):
NETWORKS = 'networks'
FIXED_IPS = 'fixed_ips'
SECURITY_GROUP_PORT_BINDINGS = 'security_group_port_bindings'
PORTS = 'ports'
ALLOCATION_POOLS = 'allocation_pools'
DNS_NAMESERVERS = 'dns_nameservers'
HOST_ROUTES = 'host_routes'
SUBNETS = 'subnets'
EXTERNAL_FIXED_IPS = 'external_fixed_ips'
EXTERNAL_GATEWAY_INFOS = 'external_gateway_infos'
ROUTERS = 'routers'
SECURITY_GROUP_RULES = 'security_group_rules'
SECURITY_GROUPS = 'security_groups'
FLOATING_IPS = 'floating_ips'
value_trans_str = ds_utils.typed_value_trans(data_types.Str)
value_trans_bool = ds_utils.typed_value_trans(data_types.Bool)
value_trans_int = ds_utils.typed_value_trans(data_types.Int)
floating_ips_translator = {
'translation-type': 'HDICT',
'table-name': FLOATING_IPS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'desc': 'The UUID of the floating IP address',
'translator': value_trans_str},
{'fieldname': 'router_id', 'desc': 'UUID of router',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'Tenant ID',
'translator': value_trans_str},
{'fieldname': 'floating_network_id',
'desc': 'The UUID of the network associated with floating IP',
'translator': value_trans_str},
{'fieldname': 'fixed_ip_address',
'desc': 'Fixed IP address associated with floating IP address',
'translator': ds_utils.typed_value_trans(data_types.IPAddress)},
{'fieldname': 'floating_ip_address',
'desc': 'The floating IP address',
'translator': ds_utils.typed_value_trans(data_types.IPAddress)},
{'fieldname': 'port_id', 'desc': 'UUID of port',
'translator': value_trans_str},
{'fieldname': 'status', 'desc': 'The floating IP status',
'translator': ds_utils.typed_value_trans(FloatingIPStatus)})}
networks_translator = {
'translation-type': 'HDICT',
'table-name': NETWORKS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'desc': 'Network ID',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'Tenant ID',
'translator': value_trans_str},
{'fieldname': 'name', 'desc': 'Network name',
'translator': value_trans_str},
{'fieldname': 'status', 'desc': 'Network status',
'translator': ds_utils.typed_value_trans(NeutronStatus)},
{'fieldname': 'admin_state_up',
'desc': 'Administrative state of the network (true/false)',
'translator': value_trans_bool},
{'fieldname': 'shared',
'desc': 'Indicates if network is shared across all tenants',
'translator': value_trans_bool})}
ports_fixed_ips_translator = {
'translation-type': 'HDICT',
'table-name': FIXED_IPS,
'parent-key': 'id',
'parent-col-name': 'port_id',
'parent-key-desc': 'UUID of Port',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'ip_address',
'desc': 'The IP addresses for the port',
'translator': ds_utils.typed_value_trans(data_types.IPAddress)},
{'fieldname': 'subnet_id',
'desc': 'The UUID of the subnet to which the port is attached',
'translator': value_trans_str})}
ports_security_groups_translator = {
'translation-type': 'LIST',
'table-name': SECURITY_GROUP_PORT_BINDINGS,
'parent-key': 'id',
'parent-col-name': 'port_id',
'parent-key-desc': 'UUID of port',
'val-col': 'security_group_id',
'val-col-desc': 'UUID of security group',
'translator': value_trans_str}
ports_translator = {
'translation-type': 'HDICT',
'table-name': PORTS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'desc': 'UUID of port',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'tenant ID',
'translator': value_trans_str},
{'fieldname': 'name', 'desc': 'port name',
'translator': value_trans_str},
{'fieldname': 'network_id', 'desc': 'UUID of attached network',
'translator': value_trans_str},
{'fieldname': 'mac_address', 'desc': 'MAC address of the port',
'translator': value_trans_str},
{'fieldname': 'admin_state_up',
'desc': 'Administrative state of the port',
'translator': value_trans_bool},
{'fieldname': 'status', 'desc': 'Port status',
'translator': ds_utils.typed_value_trans(NeutronStatus)},
{'fieldname': 'device_id',
'desc': 'The ID of the device that uses this port',
'translator': value_trans_str},
{'fieldname': 'device_owner',
'desc': 'The entity type that uses this port.'
'E.g., compute:nova, network:router_interface',
'translator': value_trans_str},
{'fieldname': 'fixed_ips',
'desc': 'The IP addresses for the port',
'translator': ports_fixed_ips_translator},
{'fieldname': 'security_groups',
'translator': ports_security_groups_translator})}
subnets_allocation_pools_translator = {
'translation-type': 'HDICT',
'table-name': ALLOCATION_POOLS,
'parent-key': 'id',
'parent-col-name': 'subnet_id',
'parent-key-desc': 'UUID of subnet',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'start',
'desc': 'The start address for the allocation pools',
'translator': value_trans_str},
{'fieldname': 'end',
'desc': 'The end address for the allocation pools',
'translator': value_trans_str})}
subnets_dns_nameservers_translator = {
'translation-type': 'LIST',
'table-name': DNS_NAMESERVERS,
'parent-key': 'id',
'parent-col-name': 'subnet_id',
'parent-key-desc': 'UUID of subnet',
'val-col': 'dns_nameserver',
'val-col-desc': 'The DNS server',
'translator': value_trans_str}
subnets_routes_translator = {
'translation-type': 'HDICT',
'table-name': HOST_ROUTES,
'parent-key': 'id',
'parent-col-name': 'subnet_id',
'parent-key-desc': 'UUID of subnet',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'destination',
'desc': 'The destination for static route',
'translator': value_trans_str},
{'fieldname': 'nexthop',
'desc': 'The next hop for the destination',
'translator': value_trans_str})}
subnets_translator = {
'translation-type': 'HDICT',
'table-name': SUBNETS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'desc': 'UUID of subnet',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'tenant ID',
'translator': value_trans_str},
{'fieldname': 'name', 'desc': 'subnet name',
'translator': value_trans_str},
{'fieldname': 'network_id', 'desc': 'UUID of attached network',
'translator': value_trans_str},
{'fieldname': 'ip_version',
'desc': 'The IP version, which is 4 or 6',
'translator': ds_utils.typed_value_trans(IPVersion)},
{'fieldname': 'cidr', 'desc': 'The CIDR',
'translator': ds_utils.typed_value_trans(data_types.IPNetwork)},
{'fieldname': 'gateway_ip', 'desc': 'The gateway IP address',
'translator': ds_utils.typed_value_trans(data_types.IPAddress)},
{'fieldname': 'enable_dhcp', 'desc': 'Is DHCP is enabled or not',
'translator': value_trans_bool},
{'fieldname': 'ipv6_ra_mode', 'desc': 'The IPv6 RA mode',
'translator': value_trans_str},
{'fieldname': 'ipv6_address_mode',
'desc': 'The IPv6 address mode', 'translator': value_trans_str},
{'fieldname': 'allocation_pools',
'translator': subnets_allocation_pools_translator},
{'fieldname': 'dns_nameservers',
'translator': subnets_dns_nameservers_translator},
{'fieldname': 'host_routes',
'translator': subnets_routes_translator})}
external_fixed_ips_translator = {
'translation-type': 'HDICT',
'table-name': EXTERNAL_FIXED_IPS,
'parent-key': 'router_id',
'parent-col-name': 'router_id',
'parent-key-desc': 'UUID of router',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'subnet_id', 'desc': 'UUID of the subnet',
'translator': value_trans_str},
{'fieldname': 'ip_address', 'desc': 'IP Address',
'translator': ds_utils.typed_value_trans(data_types.IPAddress)})}
routers_external_gateway_infos_translator = {
'translation-type': 'HDICT',
'table-name': EXTERNAL_GATEWAY_INFOS,
'parent-key': 'id',
'parent-col-name': 'router_id',
'parent-key-desc': 'UUID of router',
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'network_id', 'desc': 'Network ID',
'translator': value_trans_str},
{'fieldname': 'enable_snat',
'desc': 'current Source NAT status for router',
'translator': value_trans_bool},
{'fieldname': 'external_fixed_ips',
'translator': external_fixed_ips_translator})}
routers_translator = {
'translation-type': 'HDICT',
'table-name': ROUTERS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'desc': 'uuid of the router',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'tenant ID',
'translator': value_trans_str},
{'fieldname': 'status', 'desc': 'router status',
'translator': ds_utils.typed_value_trans(NeutronStatus)},
{'fieldname': 'admin_state_up',
'desc': 'administrative state of router',
'translator': value_trans_bool},
{'fieldname': 'name', 'desc': 'router name',
'translator': value_trans_str},
{'fieldname': 'distributed',
'desc': "indicates if it's distributed router ",
'translator': value_trans_bool},
{'fieldname': 'external_gateway_info',
'translator': routers_external_gateway_infos_translator})}
security_group_rules_translator = {
'translation-type': 'HDICT',
'table-name': SECURITY_GROUP_RULES,
'parent-key': 'id',
'parent-col-name': 'security_group_id',
'parent-key-desc': 'uuid of security group',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'id', 'desc': 'The UUID of the security group rule',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'tenant ID',
'translator': value_trans_str},
{'fieldname': 'remote_group_id',
'desc': 'remote group id to associate with security group rule',
'translator': value_trans_str},
{'fieldname': 'direction',
'desc': 'Direction in which the security group rule is applied',
'translator': ds_utils.typed_value_trans(IngressEgress)},
{'fieldname': 'ethertype', 'desc': 'IPv4 or IPv6',
'translator': value_trans_str},
{'fieldname': 'protocol',
'desc': 'protocol that is matched by the security group rule.',
'translator': value_trans_str},
{'fieldname': 'port_range_min',
'desc': 'Min port number in the range',
'translator': value_trans_int},
{'fieldname': 'port_range_max',
'desc': 'Max port number in the range',
'translator': value_trans_int},
{'fieldname': 'remote_ip_prefix',
'desc': 'Remote IP prefix to be associated',
'translator': value_trans_str})}
security_group_translator = {
'translation-type': 'HDICT',
'table-name': SECURITY_GROUPS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'desc': 'The UUID for the security group',
'translator': value_trans_str},
{'fieldname': 'tenant_id', 'desc': 'Tenant ID',
'translator': value_trans_str},
{'fieldname': 'name', 'desc': 'The security group name',
'translator': value_trans_str},
{'fieldname': 'description', 'desc': 'security group description',
'translator': value_trans_str},
{'fieldname': 'security_group_rules',
'translator': security_group_rules_translator})}
TRANSLATORS = [networks_translator, ports_translator, subnets_translator,
routers_translator, security_group_translator,
floating_ips_translator]
def __init__(self, name='', args=None):
super(NeutronV2Driver, self).__init__(name, args=args)
datasource_driver.ExecutionDriver.__init__(self)
self.creds = args
session = ds_utils.get_keystone_session(self.creds)
self.neutron = neutronclient.v2_0.client.Client(session=session)
# specify the arg name for all method structured args
self.method_structured_args = {
'add_bgp_speaker_to_dragent': {'named': frozenset(['body'])},
'add_gateway_router': {'named': frozenset(['body'])},
'add_interface_router': {'named': frozenset(['body'])},
'add_network_to_bgp_speaker': {'named': frozenset(['body'])},
'add_network_to_dhcp_agent': {'named': frozenset(['body'])},
'add_peer_to_bgp_speaker': {'named': frozenset(['body'])},
'add_router_to_l3_agent': {'named': frozenset(['body'])},
'associate_flavor': {'named': frozenset(['body'])},
'associate_health_monitor': {'named': frozenset(['body'])},
'connect_network_gateway': {'named': frozenset(['body'])},
'create_address_scope': {'named': frozenset(['body'])},
'create_bandwidth_limit_rule': {'named': frozenset(['body'])},
'create_bgp_peer': {'named': frozenset(['body'])},
'create_bgp_speaker': {'named': frozenset(['body'])},
'create_bgpvpn': {'named': frozenset(['body'])},
'create_bgpvpn_network_assoc': {'named': frozenset(['body'])},
'create_bgpvpn_port_assoc': {'named': frozenset(['body'])},
'create_bgpvpn_router_assoc': {'named': frozenset(['body'])},
'create_dscp_marking_rule': {'named': frozenset(['body'])},
'create_endpoint_group': {'named': frozenset(['body'])},
'create_ext': {'named': frozenset(['body'])},
'create_firewall': {'named': frozenset(['body'])},
'create_firewall_policy': {'named': frozenset(['body'])},
'create_firewall_rule': {'named': frozenset(['body'])},
'create_flavor': {'named': frozenset(['body'])},
'create_floatingip': {'named': frozenset(['body'])},
'create_fwaas_firewall_group': {'named': frozenset(['body'])},
'create_fwaas_firewall_policy': {'named': frozenset(['body'])},
'create_fwaas_firewall_rule': {'named': frozenset(['body'])},
'create_gateway_device': {'named': frozenset(['body'])},
'create_health_monitor': {'named': frozenset(['body'])},
'create_ikepolicy': {'named': frozenset(['body'])},
'create_ipsec_site_connection': {'named': frozenset(['body'])},
'create_ipsecpolicy': {'named': frozenset(['body'])},
'create_lbaas_healthmonitor': {'named': frozenset(['body'])},
'create_lbaas_l7policy': {'named': frozenset(['body'])},
'create_lbaas_l7rule': {'named': frozenset(['body'])},
'create_lbaas_member': {'named': frozenset(['body'])},
'create_lbaas_pool': {'named': frozenset(['body'])},
'create_listener': {'named': frozenset(['body'])},
'create_loadbalancer': {'named': frozenset(['body'])},
'create_member': {'named': frozenset(['body'])},
'create_metering_label': {'named': frozenset(['body'])},
'create_metering_label_rule': {'named': frozenset(['body'])},
'create_minimum_bandwidth_rule': {'named': frozenset(['body'])},
'create_network': {'named': frozenset(['body'])},
'create_network_gateway': {'named': frozenset(['body'])},
'create_network_log': {'named': frozenset(['body'])},
'create_pool': {'named': frozenset(['body'])},
'create_port': {'named': frozenset(['body'])},
'create_qos_policy': {'named': frozenset(['body'])},
'create_qos_queue': {'named': frozenset(['body'])},
'create_rbac_policy': {'named': frozenset(['body'])},
'create_router': {'named': frozenset(['body'])},
'create_security_group': {'named': frozenset(['body'])},
'create_security_group_rule': {'named': frozenset(['body'])},
'create_service_profile': {'named': frozenset(['body'])},
'create_sfc_flow_classifier': {'named': frozenset(['body'])},
'create_sfc_port_chain': {'named': frozenset(['body'])},
'create_sfc_port_pair': {'named': frozenset(['body'])},
'create_sfc_port_pair_group': {'named': frozenset(['body'])},
'create_sfc_service_graph': {'named': frozenset(['body'])},
'create_subnet': {'named': frozenset(['body'])},
'create_subnetpool': {'named': frozenset(['body'])},
'create_trunk': {'named': frozenset(['body'])},
'create_vip': {'named': frozenset(['body'])},
'create_vpnservice': {'named': frozenset(['body'])},
'disconnect_network_gateway': {'named': frozenset(['body'])},
'firewall_policy_insert_rule': {'named': frozenset(['body'])},
'firewall_policy_remove_rule': {'named': frozenset(['body'])},
'insert_rule_fwaas_firewall_policy': {
'named': frozenset(['body'])},
'remove_interface_router': {'named': frozenset(['body'])},
'remove_network_from_bgp_speaker': {'named': frozenset(['body'])},
'remove_peer_from_bgp_speaker': {'named': frozenset(['body'])},
'remove_rule_fwaas_firewall_policy': {
'named': frozenset(['body'])},
'replace_tag': {'named': frozenset(['body'])},
'retry_request': {'named': frozenset(['body'])},
'show_minimum_bandwidth_rule': {'named': frozenset(['body'])},
'trunk_add_subports': {'named': frozenset(['body'])},
'trunk_remove_subports': {'named': frozenset(['body'])},
}
self.add_executable_method('update_resource_attrs',
[{'name': 'resource_type',
'description': 'resource type (e.g. ' +
'port, network, subnet)'},
{'name': 'id',
'description': 'ID of the resource'},
{'name': 'attr1',
'description': 'attribute name to ' +
'update (e.g. admin_state_up)'},
{'name': 'attr1-value',
'description': 'updated attr1 value'},
{'name': 'attrN',
'description': 'attribute name to ' +
'update'},
{'name': 'attrN-value',
'description': 'updated attrN value'}],
"A wrapper for update_<resource_type>()")
self.add_executable_method('attach_port_security_group',
[{'name': 'port_id',
'description': 'ID of target port'},
{'name': 'security_group_id',
'description': 'ID security group to be '
'attached'}],
"Attach a security group to port (WARNING: "
"may overwrite concurrent changes to "
"port's security groups list.")
self.add_executable_method('detach_port_security_group',
[{'name': 'port_id',
'description': 'ID of target port'},
{'name': 'security_group_id',
'description': 'ID security group to be '
'detached'}],
"Detach a security group to port (WARNING: "
"may overwrite concurrent changes to "
"port's security groups list.")
# add action methods from client, but exclude 'update_*' because those
# are covered by the update_resource_attr method.
exclude_methods = ['update_address_scope', 'update_agent',
'update_bandwidth_limit_rule', 'update_bgp_peer',
'update_bgp_speaker', 'update_bgpvpn',
'update_bgpvpn_network_assoc',
'update_bgpvpn_port_assoc',
'update_bgpvpn_router_assoc',
'update_dscp_marking_rule', 'update_endpoint_group',
'update_ext', 'update_firewall',
'update_firewall_policy', 'update_firewall_rule',
'update_flavor', 'update_floatingip',
'update_fwaas_firewall_group',
'update_fwaas_firewall_policy',
'update_fwaas_firewall_rule',
'update_gateway_device', 'update_health_monitor',
'update_ikepolicy', 'update_ipsec_site_connection',
'update_ipsecpolicy', 'update_lbaas_healthmonitor',
'update_lbaas_l7policy', 'update_lbaas_l7rule',
'update_lbaas_member', 'update_lbaas_pool',
'update_listener', 'update_loadbalancer',
'update_member', 'update_minimum_bandwidth_rule',
'update_network', 'update_network_gateway',
'update_network_log', 'update_pool', 'update_port',
'update_qos_policy', 'update_quota',
'update_rbac_policy', 'update_router',
'update_security_group', 'update_service_profile',
'update_sfc_flow_classifier',
'update_sfc_port_chain', 'update_sfc_port_pair',
'update_sfc_port_pair_group',
'update_sfc_service_graph', 'update_subnet',
'update_subnetpool', 'update_trunk', 'update_vip',
'update_vpnservice']
self.add_executable_client_methods(self.neutron,
'neutronclient.v2_0.client',
exclude_methods)
self.initialize_update_methods()
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'neutronv2'
result['description'] = ('Datasource driver that interfaces with '
'OpenStack Networking aka Neutron.')
result['config'] = ds_utils.get_openstack_required_config()
result['config']['lazy_tables'] = constants.OPTIONAL
result['secret'] = ['password']
return result
def initialize_update_methods(self):
networks_method = lambda: self._translate_networks(
self.neutron.list_networks())
self.add_update_method(networks_method, self.networks_translator)
subnets_method = lambda: self._translate_subnets(
self.neutron.list_subnets())
self.add_update_method(subnets_method, self.subnets_translator)
ports_method = lambda: self._translate_ports(self.neutron.list_ports())
self.add_update_method(ports_method, self.ports_translator)
routers_method = lambda: self._translate_routers(
self.neutron.list_routers())
self.add_update_method(routers_method, self.routers_translator)
security_method = lambda: self._translate_security_groups(
self.neutron.list_security_groups())
self.add_update_method(security_method,
self.security_group_translator)
floatingips_method = lambda: self._translate_floating_ips(
self.neutron.list_floatingips())
self.add_update_method(floatingips_method,
self.floating_ips_translator)
@ds_utils.update_state_on_changed(FLOATING_IPS)
def _translate_floating_ips(self, obj):
LOG.debug("floating_ips: %s", dict(obj))
row_data = NeutronV2Driver.convert_objs(obj['floatingips'],
self.floating_ips_translator)
return row_data
@ds_utils.update_state_on_changed(NETWORKS)
def _translate_networks(self, obj):
LOG.debug("networks: %s", dict(obj))
row_data = NeutronV2Driver.convert_objs(obj['networks'],
self.networks_translator)
return row_data
@ds_utils.update_state_on_changed(PORTS)
def _translate_ports(self, obj):
LOG.debug("ports: %s", obj)
row_data = NeutronV2Driver.convert_objs(obj['ports'],
self.ports_translator)
return row_data
@ds_utils.update_state_on_changed(SUBNETS)
def _translate_subnets(self, obj):
LOG.debug("subnets: %s", obj)
row_data = NeutronV2Driver.convert_objs(obj['subnets'],
self.subnets_translator)
return row_data
@ds_utils.update_state_on_changed(ROUTERS)
def _translate_routers(self, obj):
LOG.debug("routers: %s", obj)
row_data = NeutronV2Driver.convert_objs(obj['routers'],
self.routers_translator)
return row_data
@ds_utils.update_state_on_changed(SECURITY_GROUPS)
def _translate_security_groups(self, obj):
LOG.debug("security_groups: %s", obj)
row_data = NeutronV2Driver.convert_objs(obj['security_groups'],
self.security_group_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.neutron, action, action_args)
def update_resource_attrs(self, args):
positional_args = args.get('positional', [])
if not positional_args or len(positional_args) < 4:
LOG.error('Args for update_resource_attrs() must contain resource '
'type, resource ID and pairs of key-value attributes to '
'update')
return
resource_type = positional_args.pop(0)
resource_id = positional_args.pop(0)
action = 'update_%s' % resource_type
update_attrs = self._convert_args(positional_args)
body = {resource_type: update_attrs}
action_args = {'named': {resource_type: resource_id,
'body': body}}
self._execute_api(self.neutron, action, action_args)
def attach_port_security_group(self, args):
self._attach_detach_port_security_group(args, attach=True)
def detach_port_security_group(self, args):
self._attach_detach_port_security_group(args, attach=False)
def _attach_detach_port_security_group(self, args, attach):
positional_args = args.get('positional', [])
if not positional_args or len(positional_args) < 2:
LOG.error('Args for attach_port_security_group() must contain '
'port id and security group id')
return
port_id = positional_args[0]
security_group_id = positional_args[1]
# get existing port security groups
port_state = self.neutron.show_port(port_id).get('port')
if not port_state:
return
port_security_groups = port_state.get('security_groups', [])
# add/remove security group
if security_group_id in port_security_groups:
if attach: # no change needed
return
port_security_groups.remove(security_group_id)
else:
if not attach: # no change needed
return
port_security_groups.append(security_group_id)
# call client to make change
# WARNING: intervening changes to security groups binding may be lost
body = {
"port": {
"security_groups": port_security_groups,
}
}
self.neutron.update_port(port_id, body)
|
openstack/congress
|
congress/datasources/neutronv2_driver.py
|
Python
|
apache-2.0
| 32,756
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.language.v1",
manifest={
"EncodingType",
"Document",
"Sentence",
"Entity",
"Token",
"Sentiment",
"PartOfSpeech",
"DependencyEdge",
"EntityMention",
"TextSpan",
"ClassificationCategory",
"AnalyzeSentimentRequest",
"AnalyzeSentimentResponse",
"AnalyzeEntitySentimentRequest",
"AnalyzeEntitySentimentResponse",
"AnalyzeEntitiesRequest",
"AnalyzeEntitiesResponse",
"AnalyzeSyntaxRequest",
"AnalyzeSyntaxResponse",
"ClassifyTextRequest",
"ClassifyTextResponse",
"AnnotateTextRequest",
"AnnotateTextResponse",
},
)
class EncodingType(proto.Enum):
r"""Represents the text encoding that the caller uses to process the
output. Providing an ``EncodingType`` is recommended because the API
provides the beginning offsets for various outputs, such as tokens
and mentions, and languages that natively use different text
encodings may access offsets differently.
"""
NONE = 0
UTF8 = 1
UTF16 = 2
UTF32 = 3
class Document(proto.Message):
r"""Represents the input to API methods.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
type_ (google.cloud.language_v1.types.Document.Type):
Required. If the type is not set or is ``TYPE_UNSPECIFIED``,
returns an ``INVALID_ARGUMENT`` error.
content (str):
The content of the input in string format.
Cloud audit logging exempt since it is based on
user data.
This field is a member of `oneof`_ ``source``.
gcs_content_uri (str):
The Google Cloud Storage URI where the file content is
located. This URI must be of the form:
gs://bucket_name/object_name. For more details, see
https://cloud.google.com/storage/docs/reference-uris. NOTE:
Cloud Storage object versioning is not supported.
This field is a member of `oneof`_ ``source``.
language (str):
The language of the document (if not specified, the language
is automatically detected). Both ISO and BCP-47 language
codes are accepted. `Language
Support <https://cloud.google.com/natural-language/docs/languages>`__
lists currently supported languages for each API method. If
the language (either specified by the caller or
automatically detected) is not supported by the called API
method, an ``INVALID_ARGUMENT`` error is returned.
"""
class Type(proto.Enum):
r"""The document types enum."""
TYPE_UNSPECIFIED = 0
PLAIN_TEXT = 1
HTML = 2
type_ = proto.Field(proto.ENUM, number=1, enum=Type,)
content = proto.Field(proto.STRING, number=2, oneof="source",)
gcs_content_uri = proto.Field(proto.STRING, number=3, oneof="source",)
language = proto.Field(proto.STRING, number=4,)
class Sentence(proto.Message):
r"""Represents a sentence in the input document.
Attributes:
text (google.cloud.language_v1.types.TextSpan):
The sentence text.
sentiment (google.cloud.language_v1.types.Sentiment):
For calls to [AnalyzeSentiment][] or if
[AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment]
is set to true, this field will contain the sentiment for
the sentence.
"""
text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",)
sentiment = proto.Field(proto.MESSAGE, number=2, message="Sentiment",)
class Entity(proto.Message):
r"""Represents a phrase in the text that is a known entity, such
as a person, an organization, or location. The API associates
information, such as salience and mentions, with entities.
Attributes:
name (str):
The representative name for the entity.
type_ (google.cloud.language_v1.types.Entity.Type):
The entity type.
metadata (Sequence[google.cloud.language_v1.types.Entity.MetadataEntry]):
Metadata associated with the entity.
For most entity types, the metadata is a Wikipedia URL
(``wikipedia_url``) and Knowledge Graph MID (``mid``), if
they are available. For the metadata associated with other
entity types, see the Type table below.
salience (float):
The salience score associated with the entity in the [0,
1.0] range.
The salience score for an entity provides information about
the importance or centrality of that entity to the entire
document text. Scores closer to 0 are less salient, while
scores closer to 1.0 are highly salient.
mentions (Sequence[google.cloud.language_v1.types.EntityMention]):
The mentions of this entity in the input
document. The API currently supports proper noun
mentions.
sentiment (google.cloud.language_v1.types.Sentiment):
For calls to [AnalyzeEntitySentiment][] or if
[AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment]
is set to true, this field will contain the aggregate
sentiment expressed for this entity in the provided
document.
"""
class Type(proto.Enum):
r"""The type of the entity. For most entity types, the associated
metadata is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph
MID (``mid``). The table below lists the associated fields for
entities that have different metadata.
"""
UNKNOWN = 0
PERSON = 1
LOCATION = 2
ORGANIZATION = 3
EVENT = 4
WORK_OF_ART = 5
CONSUMER_GOOD = 6
OTHER = 7
PHONE_NUMBER = 9
ADDRESS = 10
DATE = 11
NUMBER = 12
PRICE = 13
name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum=Type,)
metadata = proto.MapField(proto.STRING, proto.STRING, number=3,)
salience = proto.Field(proto.FLOAT, number=4,)
mentions = proto.RepeatedField(proto.MESSAGE, number=5, message="EntityMention",)
sentiment = proto.Field(proto.MESSAGE, number=6, message="Sentiment",)
class Token(proto.Message):
r"""Represents the smallest syntactic building block of the text.
Attributes:
text (google.cloud.language_v1.types.TextSpan):
The token text.
part_of_speech (google.cloud.language_v1.types.PartOfSpeech):
Parts of speech tag for this token.
dependency_edge (google.cloud.language_v1.types.DependencyEdge):
Dependency tree parse for this token.
lemma (str):
`Lemma <https://en.wikipedia.org/wiki/Lemma_%28morphology%29>`__
of the token.
"""
text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",)
part_of_speech = proto.Field(proto.MESSAGE, number=2, message="PartOfSpeech",)
dependency_edge = proto.Field(proto.MESSAGE, number=3, message="DependencyEdge",)
lemma = proto.Field(proto.STRING, number=4,)
class Sentiment(proto.Message):
r"""Represents the feeling associated with the entire text or
entities in the text.
Attributes:
magnitude (float):
A non-negative number in the [0, +inf) range, which
represents the absolute magnitude of sentiment regardless of
score (positive or negative).
score (float):
Sentiment score between -1.0 (negative
sentiment) and 1.0 (positive sentiment).
"""
magnitude = proto.Field(proto.FLOAT, number=2,)
score = proto.Field(proto.FLOAT, number=3,)
class PartOfSpeech(proto.Message):
r"""Represents part of speech information for a token. Parts of speech
are as defined in
http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
Attributes:
tag (google.cloud.language_v1.types.PartOfSpeech.Tag):
The part of speech tag.
aspect (google.cloud.language_v1.types.PartOfSpeech.Aspect):
The grammatical aspect.
case (google.cloud.language_v1.types.PartOfSpeech.Case):
The grammatical case.
form (google.cloud.language_v1.types.PartOfSpeech.Form):
The grammatical form.
gender (google.cloud.language_v1.types.PartOfSpeech.Gender):
The grammatical gender.
mood (google.cloud.language_v1.types.PartOfSpeech.Mood):
The grammatical mood.
number (google.cloud.language_v1.types.PartOfSpeech.Number):
The grammatical number.
person (google.cloud.language_v1.types.PartOfSpeech.Person):
The grammatical person.
proper (google.cloud.language_v1.types.PartOfSpeech.Proper):
The grammatical properness.
reciprocity (google.cloud.language_v1.types.PartOfSpeech.Reciprocity):
The grammatical reciprocity.
tense (google.cloud.language_v1.types.PartOfSpeech.Tense):
The grammatical tense.
voice (google.cloud.language_v1.types.PartOfSpeech.Voice):
The grammatical voice.
"""
class Tag(proto.Enum):
r"""The part of speech tags enum."""
UNKNOWN = 0
ADJ = 1
ADP = 2
ADV = 3
CONJ = 4
DET = 5
NOUN = 6
NUM = 7
PRON = 8
PRT = 9
PUNCT = 10
VERB = 11
X = 12
AFFIX = 13
class Aspect(proto.Enum):
r"""The characteristic of a verb that expresses time flow during
an event.
"""
ASPECT_UNKNOWN = 0
PERFECTIVE = 1
IMPERFECTIVE = 2
PROGRESSIVE = 3
class Case(proto.Enum):
r"""The grammatical function performed by a noun or pronoun in a
phrase, clause, or sentence. In some languages, other parts of
speech, such as adjective and determiner, take case inflection
in agreement with the noun.
"""
CASE_UNKNOWN = 0
ACCUSATIVE = 1
ADVERBIAL = 2
COMPLEMENTIVE = 3
DATIVE = 4
GENITIVE = 5
INSTRUMENTAL = 6
LOCATIVE = 7
NOMINATIVE = 8
OBLIQUE = 9
PARTITIVE = 10
PREPOSITIONAL = 11
REFLEXIVE_CASE = 12
RELATIVE_CASE = 13
VOCATIVE = 14
class Form(proto.Enum):
r"""Depending on the language, Form can be categorizing different
forms of verbs, adjectives, adverbs, etc. For example,
categorizing inflected endings of verbs and adjectives or
distinguishing between short and long forms of adjectives and
participles
"""
FORM_UNKNOWN = 0
ADNOMIAL = 1
AUXILIARY = 2
COMPLEMENTIZER = 3
FINAL_ENDING = 4
GERUND = 5
REALIS = 6
IRREALIS = 7
SHORT = 8
LONG = 9
ORDER = 10
SPECIFIC = 11
class Gender(proto.Enum):
r"""Gender classes of nouns reflected in the behaviour of
associated words.
"""
GENDER_UNKNOWN = 0
FEMININE = 1
MASCULINE = 2
NEUTER = 3
class Mood(proto.Enum):
r"""The grammatical feature of verbs, used for showing modality
and attitude.
"""
MOOD_UNKNOWN = 0
CONDITIONAL_MOOD = 1
IMPERATIVE = 2
INDICATIVE = 3
INTERROGATIVE = 4
JUSSIVE = 5
SUBJUNCTIVE = 6
class Number(proto.Enum):
r"""Count distinctions."""
NUMBER_UNKNOWN = 0
SINGULAR = 1
PLURAL = 2
DUAL = 3
class Person(proto.Enum):
r"""The distinction between the speaker, second person, third
person, etc.
"""
PERSON_UNKNOWN = 0
FIRST = 1
SECOND = 2
THIRD = 3
REFLEXIVE_PERSON = 4
class Proper(proto.Enum):
r"""This category shows if the token is part of a proper name."""
PROPER_UNKNOWN = 0
PROPER = 1
NOT_PROPER = 2
class Reciprocity(proto.Enum):
r"""Reciprocal features of a pronoun."""
RECIPROCITY_UNKNOWN = 0
RECIPROCAL = 1
NON_RECIPROCAL = 2
class Tense(proto.Enum):
r"""Time reference."""
TENSE_UNKNOWN = 0
CONDITIONAL_TENSE = 1
FUTURE = 2
PAST = 3
PRESENT = 4
IMPERFECT = 5
PLUPERFECT = 6
class Voice(proto.Enum):
r"""The relationship between the action that a verb expresses and
the participants identified by its arguments.
"""
VOICE_UNKNOWN = 0
ACTIVE = 1
CAUSATIVE = 2
PASSIVE = 3
tag = proto.Field(proto.ENUM, number=1, enum=Tag,)
aspect = proto.Field(proto.ENUM, number=2, enum=Aspect,)
case = proto.Field(proto.ENUM, number=3, enum=Case,)
form = proto.Field(proto.ENUM, number=4, enum=Form,)
gender = proto.Field(proto.ENUM, number=5, enum=Gender,)
mood = proto.Field(proto.ENUM, number=6, enum=Mood,)
number = proto.Field(proto.ENUM, number=7, enum=Number,)
person = proto.Field(proto.ENUM, number=8, enum=Person,)
proper = proto.Field(proto.ENUM, number=9, enum=Proper,)
reciprocity = proto.Field(proto.ENUM, number=10, enum=Reciprocity,)
tense = proto.Field(proto.ENUM, number=11, enum=Tense,)
voice = proto.Field(proto.ENUM, number=12, enum=Voice,)
class DependencyEdge(proto.Message):
r"""Represents dependency parse tree information for a token.
(For more information on dependency labels, see
http://www.aclweb.org/anthology/P13-2017
Attributes:
head_token_index (int):
Represents the head of this token in the dependency tree.
This is the index of the token which has an arc going to
this token. The index is the position of the token in the
array of tokens returned by the API method. If this token is
a root token, then the ``head_token_index`` is its own
index.
label (google.cloud.language_v1.types.DependencyEdge.Label):
The parse label for the token.
"""
class Label(proto.Enum):
r"""The parse label enum for the token."""
UNKNOWN = 0
ABBREV = 1
ACOMP = 2
ADVCL = 3
ADVMOD = 4
AMOD = 5
APPOS = 6
ATTR = 7
AUX = 8
AUXPASS = 9
CC = 10
CCOMP = 11
CONJ = 12
CSUBJ = 13
CSUBJPASS = 14
DEP = 15
DET = 16
DISCOURSE = 17
DOBJ = 18
EXPL = 19
GOESWITH = 20
IOBJ = 21
MARK = 22
MWE = 23
MWV = 24
NEG = 25
NN = 26
NPADVMOD = 27
NSUBJ = 28
NSUBJPASS = 29
NUM = 30
NUMBER = 31
P = 32
PARATAXIS = 33
PARTMOD = 34
PCOMP = 35
POBJ = 36
POSS = 37
POSTNEG = 38
PRECOMP = 39
PRECONJ = 40
PREDET = 41
PREF = 42
PREP = 43
PRONL = 44
PRT = 45
PS = 46
QUANTMOD = 47
RCMOD = 48
RCMODREL = 49
RDROP = 50
REF = 51
REMNANT = 52
REPARANDUM = 53
ROOT = 54
SNUM = 55
SUFF = 56
TMOD = 57
TOPIC = 58
VMOD = 59
VOCATIVE = 60
XCOMP = 61
SUFFIX = 62
TITLE = 63
ADVPHMOD = 64
AUXCAUS = 65
AUXVV = 66
DTMOD = 67
FOREIGN = 68
KW = 69
LIST = 70
NOMC = 71
NOMCSUBJ = 72
NOMCSUBJPASS = 73
NUMC = 74
COP = 75
DISLOCATED = 76
ASP = 77
GMOD = 78
GOBJ = 79
INFMOD = 80
MES = 81
NCOMP = 82
head_token_index = proto.Field(proto.INT32, number=1,)
label = proto.Field(proto.ENUM, number=2, enum=Label,)
class EntityMention(proto.Message):
r"""Represents a mention for an entity in the text. Currently,
proper noun mentions are supported.
Attributes:
text (google.cloud.language_v1.types.TextSpan):
The mention text.
type_ (google.cloud.language_v1.types.EntityMention.Type):
The type of the entity mention.
sentiment (google.cloud.language_v1.types.Sentiment):
For calls to [AnalyzeEntitySentiment][] or if
[AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment]
is set to true, this field will contain the sentiment
expressed for this mention of the entity in the provided
document.
"""
class Type(proto.Enum):
r"""The supported types of mentions."""
TYPE_UNKNOWN = 0
PROPER = 1
COMMON = 2
text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",)
type_ = proto.Field(proto.ENUM, number=2, enum=Type,)
sentiment = proto.Field(proto.MESSAGE, number=3, message="Sentiment",)
class TextSpan(proto.Message):
r"""Represents an output piece of text.
Attributes:
content (str):
The content of the output text.
begin_offset (int):
The API calculates the beginning offset of the content in
the original document according to the
[EncodingType][google.cloud.language.v1.EncodingType]
specified in the API request.
"""
content = proto.Field(proto.STRING, number=1,)
begin_offset = proto.Field(proto.INT32, number=2,)
class ClassificationCategory(proto.Message):
r"""Represents a category returned from the text classifier.
Attributes:
name (str):
The name of the category representing the document, from the
`predefined
taxonomy <https://cloud.google.com/natural-language/docs/categories>`__.
confidence (float):
The classifier's confidence of the category.
Number represents how certain the classifier is
that this category represents the given text.
"""
name = proto.Field(proto.STRING, number=1,)
confidence = proto.Field(proto.FLOAT, number=2,)
class AnalyzeSentimentRequest(proto.Message):
r"""The sentiment analysis request message.
Attributes:
document (google.cloud.language_v1.types.Document):
Input document.
encoding_type (google.cloud.language_v1.types.EncodingType):
The encoding type used by the API to
calculate sentence offsets.
"""
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",)
class AnalyzeSentimentResponse(proto.Message):
r"""The sentiment analysis response message.
Attributes:
document_sentiment (google.cloud.language_v1.types.Sentiment):
The overall sentiment of the input document.
language (str):
The language of the text, which will be the same as the
language specified in the request or, if not specified, the
automatically-detected language. See
[Document.language][google.cloud.language.v1.Document.language]
field for more details.
sentences (Sequence[google.cloud.language_v1.types.Sentence]):
The sentiment for all the sentences in the
document.
"""
document_sentiment = proto.Field(proto.MESSAGE, number=1, message="Sentiment",)
language = proto.Field(proto.STRING, number=2,)
sentences = proto.RepeatedField(proto.MESSAGE, number=3, message="Sentence",)
class AnalyzeEntitySentimentRequest(proto.Message):
r"""The entity-level sentiment analysis request message.
Attributes:
document (google.cloud.language_v1.types.Document):
Input document.
encoding_type (google.cloud.language_v1.types.EncodingType):
The encoding type used by the API to
calculate offsets.
"""
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",)
class AnalyzeEntitySentimentResponse(proto.Message):
r"""The entity-level sentiment analysis response message.
Attributes:
entities (Sequence[google.cloud.language_v1.types.Entity]):
The recognized entities in the input document
with associated sentiments.
language (str):
The language of the text, which will be the same as the
language specified in the request or, if not specified, the
automatically-detected language. See
[Document.language][google.cloud.language.v1.Document.language]
field for more details.
"""
entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",)
language = proto.Field(proto.STRING, number=2,)
class AnalyzeEntitiesRequest(proto.Message):
r"""The entity analysis request message.
Attributes:
document (google.cloud.language_v1.types.Document):
Input document.
encoding_type (google.cloud.language_v1.types.EncodingType):
The encoding type used by the API to
calculate offsets.
"""
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",)
class AnalyzeEntitiesResponse(proto.Message):
r"""The entity analysis response message.
Attributes:
entities (Sequence[google.cloud.language_v1.types.Entity]):
The recognized entities in the input
document.
language (str):
The language of the text, which will be the same as the
language specified in the request or, if not specified, the
automatically-detected language. See
[Document.language][google.cloud.language.v1.Document.language]
field for more details.
"""
entities = proto.RepeatedField(proto.MESSAGE, number=1, message="Entity",)
language = proto.Field(proto.STRING, number=2,)
class AnalyzeSyntaxRequest(proto.Message):
r"""The syntax analysis request message.
Attributes:
document (google.cloud.language_v1.types.Document):
Input document.
encoding_type (google.cloud.language_v1.types.EncodingType):
The encoding type used by the API to
calculate offsets.
"""
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",)
class AnalyzeSyntaxResponse(proto.Message):
r"""The syntax analysis response message.
Attributes:
sentences (Sequence[google.cloud.language_v1.types.Sentence]):
Sentences in the input document.
tokens (Sequence[google.cloud.language_v1.types.Token]):
Tokens, along with their syntactic
information, in the input document.
language (str):
The language of the text, which will be the same as the
language specified in the request or, if not specified, the
automatically-detected language. See
[Document.language][google.cloud.language.v1.Document.language]
field for more details.
"""
sentences = proto.RepeatedField(proto.MESSAGE, number=1, message="Sentence",)
tokens = proto.RepeatedField(proto.MESSAGE, number=2, message="Token",)
language = proto.Field(proto.STRING, number=3,)
class ClassifyTextRequest(proto.Message):
r"""The document classification request message.
Attributes:
document (google.cloud.language_v1.types.Document):
Input document.
"""
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
class ClassifyTextResponse(proto.Message):
r"""The document classification response message.
Attributes:
categories (Sequence[google.cloud.language_v1.types.ClassificationCategory]):
Categories representing the input document.
"""
categories = proto.RepeatedField(
proto.MESSAGE, number=1, message="ClassificationCategory",
)
class AnnotateTextRequest(proto.Message):
r"""The request message for the text annotation API, which can
perform multiple analysis types (sentiment, entities, and
syntax) in one call.
Attributes:
document (google.cloud.language_v1.types.Document):
Input document.
features (google.cloud.language_v1.types.AnnotateTextRequest.Features):
The enabled features.
encoding_type (google.cloud.language_v1.types.EncodingType):
The encoding type used by the API to
calculate offsets.
"""
class Features(proto.Message):
r"""All available features for sentiment, syntax, and semantic
analysis. Setting each one to true will enable that specific
analysis for the input.
Attributes:
extract_syntax (bool):
Extract syntax information.
extract_entities (bool):
Extract entities.
extract_document_sentiment (bool):
Extract document-level sentiment.
extract_entity_sentiment (bool):
Extract entities and their associated
sentiment.
classify_text (bool):
Classify the full document into categories.
"""
extract_syntax = proto.Field(proto.BOOL, number=1,)
extract_entities = proto.Field(proto.BOOL, number=2,)
extract_document_sentiment = proto.Field(proto.BOOL, number=3,)
extract_entity_sentiment = proto.Field(proto.BOOL, number=4,)
classify_text = proto.Field(proto.BOOL, number=6,)
document = proto.Field(proto.MESSAGE, number=1, message="Document",)
features = proto.Field(proto.MESSAGE, number=2, message=Features,)
encoding_type = proto.Field(proto.ENUM, number=3, enum="EncodingType",)
class AnnotateTextResponse(proto.Message):
r"""The text annotations response message.
Attributes:
sentences (Sequence[google.cloud.language_v1.types.Sentence]):
Sentences in the input document. Populated if the user
enables
[AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
tokens (Sequence[google.cloud.language_v1.types.Token]):
Tokens, along with their syntactic information, in the input
document. Populated if the user enables
[AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
entities (Sequence[google.cloud.language_v1.types.Entity]):
Entities, along with their semantic information, in the
input document. Populated if the user enables
[AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities].
document_sentiment (google.cloud.language_v1.types.Sentiment):
The overall sentiment for the document. Populated if the
user enables
[AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment].
language (str):
The language of the text, which will be the same as the
language specified in the request or, if not specified, the
automatically-detected language. See
[Document.language][google.cloud.language.v1.Document.language]
field for more details.
categories (Sequence[google.cloud.language_v1.types.ClassificationCategory]):
Categories identified in the input document.
"""
sentences = proto.RepeatedField(proto.MESSAGE, number=1, message="Sentence",)
tokens = proto.RepeatedField(proto.MESSAGE, number=2, message="Token",)
entities = proto.RepeatedField(proto.MESSAGE, number=3, message="Entity",)
document_sentiment = proto.Field(proto.MESSAGE, number=4, message="Sentiment",)
language = proto.Field(proto.STRING, number=5,)
categories = proto.RepeatedField(
proto.MESSAGE, number=6, message="ClassificationCategory",
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleapis/python-language
|
google/cloud/language_v1/types/language_service.py
|
Python
|
apache-2.0
| 29,767
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=line-too-long
r"""Beam job for model conversion.
"""
# pylint:enable=line-too-long
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import tensorflow as tf
# Import from main to force ourselves to use the same flags.
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_main # pylint:disable=unused-import
from non_semantic_speech_benchmark.export_model import model_conversion_beam_utils as utils
flags.DEFINE_list('xids', None, 'List of job IDs to run.')
flags.DEFINE_string('base_experiment_dir', None, 'Base experiment dir.')
flags.DEFINE_string('output_dir', None, 'Base output dir.')
flags.DEFINE_string('output_suffix', None,
'Output dir is {output_dir}/{xid}/{output_suffix}.')
flags.DEFINE_bool('include_frontend', False, 'Whether to export with frontend.')
flags.DEFINE_list('conversion_types', ['tflite', 'savedmodel'],
'Type of conversions.')
flags.DEFINE_bool('sanity_check', False, 'Whether to run sanity check.')
FLAGS = flags.FLAGS
def main(unused_argv):
beam_options = None
# Get metadata for conversion.
metadata = utils.get_pipeline_metadata(FLAGS.base_experiment_dir, FLAGS.xids,
FLAGS.output_dir,
FLAGS.conversion_types,
FLAGS.output_suffix)
if not metadata:
raise ValueError(
f'No data found: {FLAGS.base_experiment_dir}, {FLAGS.xids}')
logging.info('%i models in %i xids.', len(metadata), len(FLAGS.xids))
# Check that models don't already exist, and create directories if necessary.
for m in metadata:
utils.sanity_check_output_filename(m.output_filename)
logging.info('Starting to create flume pipeline...')
def _convert_and_write_model(m):
utils.convert_and_write_model(m, include_frontend=FLAGS.include_frontend,
sanity_check=FLAGS.sanity_check)
return m
# Make and run beam pipeline.
with beam.Pipeline(beam_options) as root:
_ = (
root
| 'MakeMetadataCollection' >> beam.Create(metadata)
| 'ConvertAndWriteModelsToDisk' >> beam.Map(_convert_and_write_model))
if __name__ == '__main__':
tf.compat.v2.enable_v2_behavior()
flags.mark_flags_as_required(['xids', 'base_experiment_dir', 'output_dir'])
app.run(main)
|
google-research/google-research
|
non_semantic_speech_benchmark/export_model/model_conversion_beam_main.py
|
Python
|
apache-2.0
| 3,053
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import gast
import copy
from tangent import ast
from tangent import annotations as anno
from tangent import cfg
from tangent import naming
from tangent import quoting
from tangent import template
from tangent import transformers
class ExplicitLoopIndexes(transformers.TreeTransformer):
def visit_FunctionDef(self, node):
cfg.forward(node, cfg.Active(range(len(node.args.args))))
self.namer = naming.Namer.build(node)
node = self.generic_visit(node)
return node
def visit_For(self, node):
# If the iter is a Name that is active,
# we need to rewrite the loop.
# Iterators of the form `for a in x` rely on an implicit
# indexing operation, which Tangent cannot reverse without
# more information. So, we will create an explicit
# indexing operation. Note that we will use
# integer indexes, which will cause strange behavior if
# the iterator's `next()` behavior deviates from
# a plain incrementing index.
# The right thing to do (eventually) is to write a multiple-dispatch
# version of the `next` operator, and its adjoint, so that
# we can handle e.g. dicts.
if isinstance(node.iter, (gast.Name, gast.Subscript, gast.Attribute)):
iter_name = ast.get_name(node.iter)
if iter_name in anno.getanno(node, 'active_in'):
# for a in x:
# f(a)
# # becomes
# for i in range(len(x)):
# a = x[i]
# f(a)
# Get a unique iterator name
old_target = copy.deepcopy(node.target)
new_target = quoting.quote(self.namer.unique('_idx'))
old_iter = copy.deepcopy(node.iter)
item_access = template.replace(
'old_target = x[i]',
old_target=old_target,
x=old_iter,
i=new_target)
node.target = gast.Name(id=new_target.id, ctx=gast.Store(), annotation=None)
node.iter = quoting.quote('range(len(%s))' % iter_name)
anno.setanno(node.iter, 'func', range)
anno.setanno(node.iter.args[0], 'func', len)
node.body = [item_access] + node.body
return node
def explicit_loop_indexes(node):
node = ExplicitLoopIndexes().visit(node)
for n in gast.walk(node):
for key in ('active_in', 'active_out', 'active_gen', 'active_kill'):
if anno.hasanno(n, key):
anno.delanno(n, key)
return node
|
google/tangent
|
tangent/desugar.py
|
Python
|
apache-2.0
| 2,988
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
from c7n.resources.aws import shape_validate
class ElasticSearch(BaseTest):
def test_get_resources(self):
factory = self.replay_flight_data('test_elasticsearch_get')
p = self.load_policy({
'name': 'es-get',
'resource': 'aws.elasticsearch'},
session_factory=factory)
resources = p.resource_manager.get_resources(['devx'])
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['DomainName'], 'devx')
def test_resource_manager(self):
factory = self.replay_flight_data("test_elasticsearch_query")
p = self.load_policy(
{
"name": "es-query",
"resource": "elasticsearch",
"filters": [{"DomainName": "c7n-test"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "c7n-test")
self.assertEqual(resources[0]["Tags"], [{u"Key": u"Env", u"Value": u"Dev"}])
self.assertTrue(
resources[0]["Endpoint"].startswith(
"search-c7n-test-ug4l2nqtnwwrktaeagxsqso"
)
)
def test_metrics_domain(self):
factory = self.replay_flight_data("test_elasticsearch_delete")
p = self.load_policy(
{
"name": "es-query",
"resource": "elasticsearch",
"filters": [
{
"type": "metrics",
"name": "SearchableDocuments",
"days": 4,
"period": 86400,
"value": 1000,
"op": "less-than",
}
],
},
session_factory=factory,
)
self.assertEqual(
p.resource_manager.filters[0].get_dimensions({"DomainName": "foo"}),
[
{"Name": "ClientId", "Value": "644160558196"},
{"Name": "DomainName", "Value": "foo"},
],
)
def test_delete_search(self):
factory = self.replay_flight_data("test_elasticsearch_delete")
p = self.load_policy(
{
"name": "es-query",
"resource": "elasticsearch",
"filters": [{"DomainName": "c7n-test"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "c7n-test")
client = factory().client("es")
state = client.describe_elasticsearch_domain(DomainName="c7n-test")[
"DomainStatus"
]
self.assertEqual(state["Deleted"], True)
def test_post_finding_es(self):
factory = self.replay_flight_data('test_elasticsearch_post_finding')
p = self.load_policy({
'name': 'es-post',
'resource': 'aws.elasticsearch',
'actions': [
{'type': 'post-finding',
'types': [
'Software and Configuration Checks/OrgStandard/abc-123']}]},
session_factory=factory, config={'region': 'us-west-2'})
resources = p.resource_manager.resources()
self.maxDiff = None
self.assertEqual(len(resources), 1)
fresource = p.resource_manager.actions[0].format_resource(resources[0])
self.assertEqual(
fresource['Details']['AwsElasticsearchDomain'],
{'AccessPolicies': '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"*"},"Action":"es:*","Resource":"arn:aws:es:us-west-2:644160558196:domain/devx/*"}]}', # noqa
'DomainEndpointOptions': {
'EnforceHTTPS': True,
'TLSSecurityPolicy': 'Policy-Min-TLS-1-0-2019-07'},
'DomainId': '644160558196/devx',
'DomainName': 'devx',
'Endpoints': {
'vpc': 'vpc-devx-4j4l2ateukiwrnnxgbowppjt64.us-west-2.es.amazonaws.com'},
'ElasticsearchVersion': '7.4',
'EncryptionAtRestOptions': {
'Enabled': True,
'KmsKeyId': 'arn:aws:kms:us-west-2:644160558196:key/9b776c6e-0a40-45d0-996b-707018677fe9' # noqa
},
'NodeToNodeEncryptionOptions': {'Enabled': True},
'VPCOptions': {'AvailabilityZones': ['us-west-2b'],
'SecurityGroupIds': ['sg-0eecc076'],
'SubnetIds': ['subnet-63c97615'],
'VPCId': 'vpc-4a9ff72e'}})
shape_validate(
fresource['Details']['AwsElasticsearchDomain'],
'AwsElasticsearchDomainDetails',
'securityhub')
def test_domain_add_tag(self):
session_factory = self.replay_flight_data("test_elasticsearch_add_tag")
client = session_factory(region="us-east-1").client("es")
p = self.load_policy(
{
"name": "tag-elasticsearch-domain",
"resource": "elasticsearch",
"filters": [{"tag:MyTag": "absent"}],
"actions": [{"type": "tag", "key": "MyTag", "value": "MyValue"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "c7n-test")
tags = client.list_tags(ARN=resources[0]["ARN"])["TagList"][0]
self.assertEqual(tags, {"Key": "MyTag", "Value": "MyValue"})
def test_domain_remove_tag(self):
session_factory = self.replay_flight_data("test_elasticsearch_remove_tag")
client = session_factory(region="us-east-1").client("es")
p = self.load_policy(
{
"name": "remove-tag-elasticsearch-domain",
"resource": "elasticsearch",
"filters": [{"tag:MyTag": "present"}],
"actions": [{"type": "remove-tag", "tags": ["MyTag"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "c7n-test")
tags = client.list_tags(ARN=resources[0]["ARN"])["TagList"]
self.assertEqual(len(tags), 0)
def test_domain_mark_for_op(self):
session_factory = self.replay_flight_data("test_elasticsearch_markforop")
client = session_factory(region="us-east-1").client("es")
p = self.load_policy(
{
"name": "markforop-elasticsearch-domain",
"resource": "elasticsearch",
"filters": [{"tag:MyTag": "absent"}],
"actions": [
{
"type": "mark-for-op",
"days": 1,
"tag": "es_custodian_cleanup",
"op": "delete",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "c7n-test")
tags = client.list_tags(ARN=resources[0]["ARN"])["TagList"][0]
self.assertEqual(
tags,
{
"Key": "es_custodian_cleanup",
"Value": "Resource does not meet policy: delete@2017/11/30",
},
)
def test_domain_marked_for_op(self):
session_factory = self.replay_flight_data("test_elasticsearch_markedforop")
p = self.load_policy(
{
"name": "markedforop-elasticsearch-domain",
"resource": "elasticsearch",
"filters": [
{
"type": "marked-for-op",
"tag": "es_custodian_cleanup",
"skew": 1,
"op": "delete",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "c7n-test")
def test_modify_security_groups(self):
session_factory = self.replay_flight_data(
"test_elasticsearch_modify_security_groups"
)
p = self.load_policy(
{
"name": "modify-es-sg",
"resource": "elasticsearch",
"filters": [
{
"type": "security-group",
"key": "GroupId",
"value": ["sg-6c7fa917", "sg-3839ec4b"],
"op": "in",
}
],
"actions": [
{
"type": "modify-security-groups",
"add": ["sg-9a5386e9"],
"remove": ["sg-3839ec4b"],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(
sorted(resources[0]["VPCOptions"]["SecurityGroupIds"]),
sorted(["sg-6c7fa917", "sg-3839ec4b"]),
)
client = session_factory(region="us-east-1").client("es")
result = client.describe_elasticsearch_domains(
DomainNames=[resources[0]["DomainName"]]
)[
"DomainStatusList"
]
self.assertEqual(
sorted(result[0]["VPCOptions"]["SecurityGroupIds"]),
sorted(["sg-6c7fa917", "sg-9a5386e9"]),
)
class TestReservedInstances(BaseTest):
def test_elasticsearch_reserved_node_query(self):
session_factory = self.replay_flight_data("test_elasticsearch_reserved_instances_query")
p = self.load_policy(
{
"name": "elasticsearch-reserved",
"resource": "aws.elasticsearch-reserved"
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["ReservedElasticsearchInstanceId"],
"036381d0-4fa5-4484-bd1a-efc1b43af0bf"
)
|
capitalone/cloud-custodian
|
tests/test_elasticsearch.py
|
Python
|
apache-2.0
| 10,624
|
'''
@author:fangxiao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import time
import os
import apibinding.api_actions as api_actions
import zstackwoodpecker.operations.vxlan_operations as vxlan_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.deploy_operations as dep_ops
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.vpc_operations as vpc_ops
#test_obj_dict is to track test resource. They will be cleanup if there will be any exception in testing.
test_obj_dict = test_state.TestStateDict()
project_uuid = None
linked_account_uuid = None
project_operator_uuid = None
account_lists = None
test_stub = test_lib.lib_get_test_stub()
vni_range_uuid = None
vxlan_pool_uuid = None
l2_vxlan_network_uuid = None
account1_uuid = None
account2_uuid = None
def test():
global linked_account_uuid,project_uuid,project_operator_uuid,account_lists,vni_range_uuid,vxlan_pool_uuid,l2_vxlan_network_uuid,account1_uuid,account2_uuid
# create vxlan pool and vni range
zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
cluster_uuid = res_ops.get_resource(res_ops.CLUSTER)[0].uuid
vxlan_pool_name = 'vxlan_pool_name'
vxlan_pool_uuid = vxlan_ops.create_l2_vxlan_network_pool(vxlan_pool_name,zone_uuid).uuid
vxlan_ops.create_vni_range('vni_range',20,40,vxlan_pool_uuid)
systemTags = ["l2NetworkUuid::%s::clusterUuid::%s::cidr::{172.20.0.1/16}"%(vxlan_pool_uuid,cluster_uuid)]
net_ops.attach_l2_vxlan_pool(vxlan_pool_uuid,cluster_uuid,systemTags)
# 1 create project
project_name = 'test_share_project1'
project = iam2_ops.create_iam2_project(project_name)
project_uuid = project.uuid
#cond = res_ops.gen_query_conditions("name",'=',"test_share_project1")
#linked_account_uuid = res_ops.query_resource(res_ops.ACCOUNT,cond)[0].uuid
linked_account_uuid = project.linkedAccountUuid
# 2 create project operator
project_operator_name = 'share_username1'
project_operator_password = 'password'
attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
project_operator_uuid = iam2_ops.create_iam2_virtual_id(project_operator_name,project_operator_password,attributes=attributes).uuid
# 3 login in project by project operator
iam2_ops.add_iam2_virtual_ids_to_project([project_operator_uuid],project_uuid)
project_operator_session_uuid = iam2_ops.login_iam2_virtual_id(project_operator_name,project_operator_password)
project_login_uuid = iam2_ops.login_iam2_project(project_name,session_uuid=project_operator_session_uuid).uuid
# todo:use the shared resources
# 4 share admin resources to project
test_stub.share_admin_resource_include_vxlan_pool([linked_account_uuid])
# use the shared resources to create vm
vm = test_stub.create_vm(session_uuid=project_login_uuid)
volume = test_stub.create_volume(session_uuid=project_login_uuid)
test_obj_dict.add_volume(volume)
test_obj_dict.add_vm(vm)
l2_vxlan_network_uuid = vxlan_ops.create_l2_vxlan_network('l2_vxlan',vxlan_pool_uuid,zone_uuid,session_uuid=project_login_uuid).uuid
virtual_router_offering_uuid = res_ops.get_resource(res_ops.VR_OFFERING)[0].uuid
vpc_ops.create_vpc_vrouter('vpc_router',virtual_router_offering_uuid,session_uuid=project_login_uuid)
# 5 revoke admin resources from project
test_stub.revoke_admin_resource([linked_account_uuid])
# 6 share to all
#create_account
account1_uuid = acc_ops.create_account('user1','password','Normal').uuid
account2_uuid = acc_ops.create_account('user2','password','Normal').uuid
account_lists = res_ops.query_resource(res_ops.ACCOUNT)
for account in account_lists:
test_stub.share_admin_resource_include_vxlan_pool([account.uuid])
# 7 revoke resources from all
for account in account_lists:
test_stub.revoke_admin_resource([account.uuid])
# 8 Negative test
test_util.test_dsc('Doing negative test.Try to use the resources not shared to create vm')
try:
test_stub.create_vm(session_uuid=project_login_uuid)
except:
test_util.test_logger('Catch excepted excepttion.can not use the resources not shared to create vm')
else:
test_util.test_fail('Catch wrong logic:create vm success with the resources not shared ')
test_util.test_dsc('Doing negative test.Try to use the resources not shared to create volume')
try:
test_stub.create_volume(session_uuid=project_login_uuid)
except:
test_util.test_logger('Catch excepted excepttion.can not use the resources not shared to create volume')
else:
test_util.test_fail('Catch wrong logic:create volume success with the resources not shared ')
test_util.test_dsc('Doing negative test.Try to use the resources not shared to create vxlan network')
try:
vxlan_ops.create_l2_vxlan_network('l2_vxlan',vxlan_pool_uuid,zone_uuid,session_uuid=project_login_uuid)
except:
test_util.test_logger('Catch excepted excepttion.can not use the resources not shared to create l2 vxlan')
else:
test_util.test_fail('Catch wrong logic:create l2 vxlan success with the resources not shared ')
test_util.test_dsc('Doing negative test.Try to use the resources not shared to create vpc_vrouter ')
try:
vpc_ops.create_vpc_vrouter('vpc_router',virtual_router_offerings,session_uuid=project_login_uuid)
except:
test_util.test_logger('Catch excepted excepttion.can not use the resources not shared to create vpc_router')
else:
test_util.test_fail('Catch wrong logic:create vpc_router success with the resources not shared ')
# 9 delete
acc_ops.logout(project_login_uuid)
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
vni_range_uuid = res_ops.get_resource(res_ops.VNI_RANGE)[0].uuid
vxlan_ops.delete_vni_range(vni_range_uuid)
vpc_ops.remove_all_vpc_vrouter()
test_lib.lib_error_cleanup(test_obj_dict)
net_ops.delete_l2(vxlan_pool_uuid)
net_ops.delete_l2(l2_vxlan_network_uuid)
acc_ops.delete_account(account1_uuid)
acc_ops.delete_account(account2_uuid)
def error_cleanup():
global project_uuid,project_operator_uuid, vxlan_pool_uuid,vni_range_uuid,l2_vxlan_network_uuid,account1_uuid,account2_uuid
if project_operator_uuid:
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
if project_uuid:
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
if vni_range_uuid:
vxlan_ops.delete_vni_range(vni_range_uuid)
vpc_ops.remove_all_vpc_vrouter()
test_lib.lib_error_cleanup(test_obj_dict)
if vxlan_pool_uuid:
net_ops.delete_l2(vxlan_pool_uuid)
if l2_vxlan_network_uuid:
net_ops.delete_l2(l2_vxlan_network_uuid)
if account1_uuid:
acc_ops.delete_account(account1_uuid)
if account2_uuid:
acc_ops.delete_account(account2_uuid)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/simulator/iam2/test_share_resources_to_project.py
|
Python
|
apache-2.0
| 7,458
|
# -*- coding: utf-8 -*-
from model.contact import PersonalData
def test_delete_all_contacts(app, db, check_ui):
if app.contact.count() == 0:
app.contact.init_new_contact()
app.contact.fill_personal_data(PersonalData(firstname="test"))
app.contact.submit_contact()
app.contact.init_new_contact()
app.contact.fill_personal_data(PersonalData(firstname="test2"))
app.contact.submit_contact()
app.contact.delete_all_contacts()
app.contact.open_main_page()
# new_contacts = app.contact.get_contact_list() usuwam bo nie ma potrzeby wczytywać listy
assert len(db.get_contact_list()) == 0
if check_ui:
assert app.contact.count() == 0
|
Droriel/python_training
|
test/test_del_contacts_all.py
|
Python
|
apache-2.0
| 709
|
# Specify a model and the file path where it is to be saved. If no path is specified, the model will be saved to the
# current working directory
model_path = h2o.save_model(
model = model,
#path = "/tmp/mymodel",
force = True)
print model_path
# /tmp/mymodel/DeepLearning_model_python_1441838096933
|
pchmieli/h2o-3
|
h2o-docs/src/booklets/v2_2015/source/DeepLearning_Vignette_code_examples/deeplearning_savemodel.py
|
Python
|
apache-2.0
| 325
|
# Copyright (c) 2018 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as lib_const
from neutron.agent.l3.extensions import snat_log
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.tests.unit.services.logapi.agent.l3 import test_base
class SnatLogExtensionInitializeTestCase(test_base.L3LoggingExtBaseTestCase):
def setUp(self):
super(SnatLogExtensionInitializeTestCase, self).setUp()
self.snat_log_ext = snat_log.SNATLoggingExtension()
self.snat_log_ext.consume_api(self.agent_api)
@mock.patch.object(registry, 'register')
@mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback')
def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock):
call_to_patch = 'neutron.common.rpc.Connection'
with mock.patch(call_to_patch,
return_value=self.connection) as create_connection:
self.snat_log_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
create_connection.assert_has_calls([mock.call()])
self.connection.create_consumer.assert_has_calls(
[mock.call(
resources_rpc.resource_type_versioned_topic(
resources.LOGGING_RESOURCE),
[rpc_mock()],
fanout=True)]
)
subscribe_mock.assert_called_with(
mock.ANY, resources.LOGGING_RESOURCE)
|
noironetworks/neutron
|
neutron/tests/unit/agent/l3/extensions/test_snat_log.py
|
Python
|
apache-2.0
| 2,160
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def vec_as_list(ip,port):
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
res = h2o.as_list(iris[0], use_pandas=False)
assert abs(float(res[4][0]) - 4.6) < 1e-10 and abs(float(res[6][0]) - 5.4) < 1e-10 and \
abs(float(res[10][0]) - 4.9) < 1e-10, "incorrect values"
res = 2 - iris
res2 = h2o.as_list(res[0], use_pandas=False)
assert abs(float(res2[4][0]) - -2.6) < 1e-10 and abs(float(res2[18][0]) - -3.1) < 1e-10 and \
abs(float(res2[25][0]) - -2.8) < 1e-10, "incorrect values"
res3 = h2o.as_list(res[1], use_pandas=False)
assert abs(float(res3[4][0]) - -1.1) < 1e-10 and abs(float(res3[6][0]) - -1.9) < 1e-10 and \
abs(float(res3[10][0]) - -1.1) < 1e-10, "incorrect values"
if __name__ == "__main__":
tests.run_test(sys.argv, vec_as_list)
|
bospetersen/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_vec_as_list.py
|
Python
|
apache-2.0
| 910
|
"""
CGAL DCEL implemanation
By Georgios Koloventzos
DCEL First Implemantation
Download-URL: https://netfiles.uiuc.edu/ayg/www/stuff/dcel-0.1.0.tar.gz
By Angel Yanguas-Gil
"""
from CGAL import *
from math import *
from cgalvisual import *
def collinear(i, j, k):
return orientation(i, j, k) == CGAL.Kernel.Sign.EQUAL
class Vertex:
"""Minimal implementation of a vertex of a 2D dcel"""
def __init__(self, point_2):
self.x = point_2.x()
self.y = point_2.y()
self.hedgelist = []
def sortincident(self):
self.hedgelist.sort(hsort, reverse=True)
########################################################################
class Hedge:
"""Minimal implementation of a half-edge of a 2D dcel"""
def __init__(self,segment_2):
#The origin is defined as the vertex it points to
self.v2 = segment_2.end()
self.v1 = segment_2.start()
self.origin = segment_2.target()
self.twin = None
self.face = None
self.nexthedge = None
self.angle = hangle(self.v2.x()-self.v1.x(), self.v2.y()-self.v1.y())
self.prevhedge = None
self.length = sqrt(segment_2.squared_length())
def start(self):
return self.v1
def end(self):
return self.v2
########################################################################
class Face:
"""Implements a face of a 2D dcel"""
def __init__(self):
self.wedge = None
self.lvertices = []
self.ledges = []
self.data = None
self.external = None
def area(self):
h = self.wedge
a = 0
while(not h.nexthedge is self.wedge):
p1 = h.origin
p2 = h.nexthedge.origin
a += p1.x()*p2.y() - p2.x()*p1.y()
h = h.nexthedge
p1 = h.origin
p2 = self.wedge.origin
a = (a + p1.x()*p2.y() - p2.x()*p1.y())/2
return a
def perimeter(self):
p=0
for h in ledges:
p += h.length
return p
def isinside(self, p):
"""Determines whether a point is inside a face"""
for h in ledges:
if lefton(h,p):
continue
else:
return False
return True
########################################################################
class Dcel():
"""
Implements a doubly-connected edge list
"""
def __init__(self, vl=[], el=[]):
self.vertices = []
self.hedges = []
self.faces = []
if vl == []:
return False
#Step 1: vertex list creation
for v in vl:
self.vertices.append(Vertex(v))
#Step 2: hedge list creation. Assignment of twins and
#vertices
for e in el:
h1 = Hedge(e)
h2 = Hedge(e.opposite())
h1.twin = h2
h2.twin = h1
i = vl.index(e[1])
j = vl.index(e[0])
self.vertices[i].hedgelist.append(h1)
self.vertices[j].hedgelist.append(h2)
self.hedges.append(h2)
self.hedges.append(h1)
#Step 3: Identification of next and prev hedges
for v in self.vertices:
v.sortincident()
l = len(v.hedgelist)
if l < 2:
g = VPoint_2(v.x,v.y)
raise DcelError(
"Badly formed dcel: less than two hedges in vertex")
else:
for i in range(l-1):
v.hedgelist[i].nexthedge = v.hedgelist[i+1].twin
v.hedgelist[i+1].prevhedge = v.hedgelist[i]
v.hedgelist[l-1].nexthedge = v.hedgelist[0].twin
v.hedgelist[0].prevhedge = v.hedgelist[l-1]
#Step 4: Face assignment
provlist = self.hedges[:]
nf = 0
nh = len(self.hedges)
while nh > 0:
h = provlist.pop()
nh -= 1
#We check if the hedge already points to a face
if h.face == None:
f = Face()
nf += 1
#We link the hedge to the new face
f.wedge = h
f.wedge.face = f
f.ledges.append(h)
f.lvertices.append(h.end())
#And we traverse the boundary of the new face
while (not h.nexthedge is f.wedge):
h = h.nexthedge
f.ledges.append(h)
f.lvertices.append(h.end())
h.face = f
self.faces.append(f)
#And finally we have to determine the external face
for f in self.faces:
f.external = f.area() < 0
def findpoints(self, pl, onetoone=False):
"""Given a list of points pl, returns a list of
with the corresponding face each point belongs to and
None if it is outside the map.
"""
ans = []
if onetoone:
fl = self.faces[:]
for p in pl:
found = False
for f in fl:
if f.external:
continue
if f.isinside(p):
fl.remove(f)
found = True
ans.append(f)
break
if not found:
ans.append(None)
else:
for p in pl:
found = False
for f in self.faces:
if f.external:
continue
if f.isinside(p):
found = True
ans.append(f)
break
if not found:
ans.append(None)
return ans
def areas(self):
return [f.area() for f in self.faces if not f.external]
def perimeters(self):
return [f.perimeter() for f in self.faces if not f.external]
def nfaces(self):
return len(self.faces)
def nvertices(self):
return len(self.vertices)
def nedges(self):
return len(self.hedges)/2
########################################################################
def hsort(h1, h2):
"""Sorts two half edges counterclockwise"""
if h1.angle < h2.angle:
return -1
elif h1.angle > h2.angle:
return 1
else:
return 0
def checkhedges(hl):
"""Consistency check of a hedge list: nexthedge, prevhedge"""
for h in hl:
if h.nexthedge not in hl or h.prevhedge not in hl:
raise DcelError("Problems with an orphan hedge...")
def area2(hedge, point):
"""Determines the area of the triangle formed by a hedge and
an external point"""
pa = hedge.twin.origin
pb=hedge.origin
pc=point
t.Triangle(pa,pb,pc)
return t.area()
def lefton(hedge, point):
"""Determines if a point is to the left of a hedge"""
return orientation(hedge.start(),hedge.end(),point) == CGAL.Kernel.Sign.LARGER
def hangle(dx,dy):
"""Determines the angle with respect to the x axis of a segment
of coordinates dx and dy
"""
l = sqrt(dx*dx + dy*dy)
if dy > 0:
return acos(dx/l)
else:
return 2*pi - acos(dx/l)
|
sfagmenos/GuardsInArtGallery
|
dcel.py
|
Python
|
apache-2.0
| 7,306
|
from common import TestID, log_surl_call_result
from eu.emi.security.authn.x509.impl import PEMCredential
from exceptions import Exception
from jarray import array
from java.io import FileInputStream
from javax.net.ssl import X509ExtendedKeyManager
from net.grinder.plugin.http import HTTPRequest
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from org.italiangrid.srm.client import SRMClient, SRMClientFactory
import random
import traceback
error = grinder.logger.error
info = grinder.logger.info
debug = grinder.logger.debug
props = grinder.properties
def mkdir(surl, client):
debug("Creating directory: %s" % surl)
res= client.srmMkdir(surl)
debug("Directory created")
return res
class TestRunner:
def __call__(self, surl, client):
if client is None:
raise Exception("Please set a non-null SRM client!")
test = Test(TestID.MKDIR, "StoRM MKDIR")
test.record(mkdir)
try:
return mkdir(surl, client)
except Exception:
error("Error executing mkdir: %s" % traceback.format_exc())
raise
|
italiangrid/grinder-load-testsuite
|
storm/base/mkdir.py
|
Python
|
apache-2.0
| 1,065
|
import GameLibrary.Action as Action
import GameLibrary.Card as Card
import GameLibrary.Client as Client
import GameLibrary.Constant as Constant
import GameLibrary.Creature as Creature
import GameLibrary.Game as Game
import GameLibrary.Player as Player
import GameLibrary.Spell as Spell
print(dir(Action))
print(dir(Action.Action))
print(dir(Card))
print(dir(Card.Card))
print(dir(Client))
print(dir(Client.Client))
print(dir(Constant))
print(dir(Constant.Constant))
print(dir(Creature))
print(dir(Creature.Creature))
print(dir(Game))
print(dir(Game.Game))
print(dir(Player))
print(dir(Player.Player))
print(dir(Spell))
print(dir(Spell.Spell))
|
maspe36/WhispererTCG
|
cards/test.py
|
Python
|
apache-2.0
| 651
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Interfaces
####################
This subpackage contains support for (small) third-party packages and
external file types.
"""
from .cclib import universe_from_cclib
from .xyz import XYZ
from .cube import Cube
from .tens import RTensor
|
tjduigna/exatomic
|
exatomic/interfaces/__init__.py
|
Python
|
apache-2.0
| 378
|
from model.contact import Contact
testdata = [
Contact(firstname="Senin", lastname="Pavel", email="psenin@mail.ru", bday="24",
bmonth="December", byear="1981"),
Contact(firstname="Senin2", lastname="Pavel2", email="psenin@mail.ru2", bday="24",
bmonth="December", byear="1982")
]
|
senin24/python_trainig
|
data/contacts.py
|
Python
|
apache-2.0
| 330
|
from __future__ import unicode_literals
import pytz
from datetime import datetime
from django.utils import timezone
from django.core.exceptions import PermissionDenied, ValidationError
from django.views.generic import ListView, DeleteView, View, TemplateView
from django.shortcuts import redirect
from django.views.defaults import page_not_found
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import HttpResponse
from django.db.models import Q
from website import search
from osf.models import NodeLog
from osf.models.user import OSFUser
from osf.models.node import Node
from osf.models.registrations import Registration
from osf.models import SpamStatus
from admin.base.utils import change_embargo_date, validate_embargo_date
from admin.base.views import GuidFormView, GuidView
from osf.models.admin_log_entry import (
update_admin_log,
NODE_REMOVED,
NODE_RESTORED,
CONTRIBUTOR_REMOVED,
CONFIRM_SPAM,
CONFIRM_HAM,
REINDEX_SHARE,
REINDEX_ELASTIC,
)
from admin.nodes.templatetags.node_extras import reverse_node
from admin.nodes.serializers import serialize_node, serialize_simple_user_and_node_permissions, serialize_log
from website.project.tasks import update_node_share
from website.project.views.register import osf_admin_change_status_identifier
class NodeFormView(PermissionRequiredMixin, GuidFormView):
""" Allow authorized admin user to input specific node guid.
Basic form. No admin models.
"""
template_name = 'nodes/search.html'
object_type = 'node'
permission_required = 'osf.view_node'
raise_exception = True
@property
def success_url(self):
return reverse_node(self.guid)
class NodeRemoveContributorView(PermissionRequiredMixin, DeleteView):
""" Allow authorized admin user to remove project contributor
Interface with OSF database. No admin models.
"""
template_name = 'nodes/remove_contributor.html'
context_object_name = 'node'
permission_required = ('osf.view_node', 'osf.change_node')
raise_exception = True
def add_contributor_removed_log(self, node, user):
osf_log = NodeLog(
action=NodeLog.CONTRIB_REMOVED,
user=None,
params={
'project': node.parent_id,
'node': node.pk,
'contributors': user.pk
},
date=timezone.now(),
should_hide=True,
)
return osf_log.save()
def delete(self, request, *args, **kwargs):
try:
node, user = self.get_object()
if node.remove_contributor(user, None, log=False):
update_admin_log(
user_id=self.request.user.id,
object_id=node.pk,
object_repr='Contributor',
message='User {} removed from {} {}.'.format(
user.pk, node.__class__.__name__.lower(), node.pk
),
action_flag=CONTRIBUTOR_REMOVED
)
# Log invisibly on the OSF.
self.add_contributor_removed_log(node, user)
except AttributeError:
return page_not_found(
request,
AttributeError(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
)
)
)
if isinstance(node, Node):
return redirect(reverse_node(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = {}
node, user = kwargs.get('object')
context.setdefault('guid', node._id)
context.setdefault('user', serialize_simple_user_and_node_permissions(node, user))
context['link'] = 'nodes:remove_user'
context['resource_type'] = 'project'
return super(NodeRemoveContributorView, self).get_context_data(**context)
def get_object(self, queryset=None):
return (Node.load(self.kwargs.get('guid')),
OSFUser.load(self.kwargs.get('user_id')))
class NodeDeleteBase(DeleteView):
template_name = None
context_object_name = 'node'
object = None
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object')._id)
return super(NodeDeleteBase, self).get_context_data(**context)
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
class NodeDeleteView(PermissionRequiredMixin, NodeDeleteBase):
""" Allow authorized admin user to remove/hide nodes
Interface with OSF database. No admin models.
"""
template_name = 'nodes/remove_node.html'
object = None
permission_required = ('osf.view_node', 'osf.delete_node')
raise_exception = True
def get_context_data(self, **kwargs):
context = super(NodeDeleteView, self).get_context_data(**kwargs)
context['link'] = 'nodes:remove'
context['resource_type'] = 'node'
return context
def delete(self, request, *args, **kwargs):
try:
node = self.get_object()
flag = None
osf_flag = None
message = None
if node.is_deleted:
node.is_deleted = False
node.deleted_date = None
flag = NODE_RESTORED
message = 'Node {} restored.'.format(node.pk)
osf_flag = NodeLog.NODE_CREATED
elif not node.is_registration:
node.is_deleted = True
node.deleted_date = timezone.now()
flag = NODE_REMOVED
message = 'Node {} removed.'.format(node.pk)
osf_flag = NodeLog.NODE_REMOVED
node.save()
if flag is not None:
update_admin_log(
user_id=self.request.user.id,
object_id=node.pk,
object_repr='Node',
message=message,
action_flag=flag
)
if osf_flag is not None:
# Log invisibly on the OSF.
osf_log = NodeLog(
action=osf_flag,
user=None,
params={
'project': node.parent_id,
},
date=timezone.now(),
should_hide=True,
)
osf_log.save()
except AttributeError:
return page_not_found(
request,
AttributeError(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
kwargs.get('guid')
)
)
)
return redirect(reverse_node(self.kwargs.get('guid')))
class NodeView(PermissionRequiredMixin, GuidView):
""" Allow authorized admin user to view nodes
View of OSF database. No admin models.
"""
template_name = 'nodes/node.html'
context_object_name = 'node'
permission_required = 'osf.view_node'
raise_exception = True
def get_context_data(self, **kwargs):
kwargs = super(NodeView, self).get_context_data(**kwargs)
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against
kwargs.update({'message': kwargs.get('message')}) # Pass spam status in to check against
return kwargs
def get_object(self, queryset=None):
guid = self.kwargs.get('guid')
node = Node.load(guid) or Registration.load(guid)
return serialize_node(node)
class AdminNodeLogView(PermissionRequiredMixin, ListView):
""" Allow admins to see logs"""
template_name = 'nodes/node_logs.html'
context_object_name = 'node'
paginate_by = 10
paginate_orphans = 1
ordering = 'date'
permission_required = 'osf.view_node'
raise_exception = True
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
def get_queryset(self):
node = self.get_object()
query = Q(node_id__in=list(Node.objects.get_children(node).values_list('id', flat=True)) + [node.id])
return NodeLog.objects.filter(query).order_by('-date').include(
'node__guids', 'user__guids', 'original_node__guids', limit_includes=10
)
def get_context_data(self, **kwargs):
query_set = self.get_queryset()
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'logs': list(map(serialize_log, query_set)),
'page': page,
}
class RegistrationListView(PermissionRequiredMixin, ListView):
""" Allow authorized admin user to view list of registrations
View of OSF database. No admin models.
"""
template_name = 'nodes/registration_list.html'
paginate_by = 10
paginate_orphans = 1
ordering = 'created'
context_object_name = '-node'
permission_required = 'osf.view_registration'
raise_exception = True
def get_queryset(self):
return Registration.objects.all().order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'nodes': list(map(serialize_node, query_set)),
'page': page,
}
class StuckRegistrationListView(RegistrationListView):
""" List view that filters by registrations the have been archiving files by more then 24 hours.
"""
def get_queryset(self):
return Registration.find_failed_registrations().order_by(self.ordering)
class RegistrationUpdateEmbargoView(PermissionRequiredMixin, View):
""" Allow authorized admin user to update the embargo of a registration
"""
permission_required = ('osf.change_node')
raise_exception = True
def post(self, request, *args, **kwargs):
validation_only = (request.POST.get('validation_only', False) == 'True')
end_date = request.POST.get('date')
user = request.user
registration = self.get_object()
try:
end_date = pytz.utc.localize(datetime.strptime(end_date, '%m/%d/%Y'))
except ValueError:
return HttpResponse('Please enter a valid date.', status=400)
try:
if validation_only:
validate_embargo_date(registration, user, end_date)
else:
change_embargo_date(registration, user, end_date)
except ValidationError as e:
return HttpResponse(e, status=409)
except PermissionDenied as e:
return HttpResponse(e, status=403)
return redirect(reverse_node(self.kwargs.get('guid')))
def get_object(self, queryset=None):
return Registration.load(self.kwargs.get('guid'))
class NodeSpamList(PermissionRequiredMixin, ListView):
SPAM_STATE = SpamStatus.UNKNOWN
paginate_by = 25
paginate_orphans = 1
ordering = 'created'
context_object_name = '-node'
permission_required = 'osf.view_spam'
raise_exception = True
def get_queryset(self):
return Node.objects.filter(spam_status=self.SPAM_STATE).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'nodes': list(map(serialize_node, query_set)),
'page': page,
}
class NodeFlaggedSpamList(NodeSpamList, DeleteView):
SPAM_STATE = SpamStatus.FLAGGED
template_name = 'nodes/flagged_spam_list.html'
def delete(self, request, *args, **kwargs):
if not request.user.has_perm('auth.mark_spam'):
raise PermissionDenied('You do not have permission to update a node flagged as spam.')
node_ids = [
nid for nid in request.POST.keys()
if nid != 'csrfmiddlewaretoken'
]
for nid in node_ids:
node = Node.load(nid)
osf_admin_change_status_identifier(node)
node.confirm_spam(save=True)
update_admin_log(
user_id=self.request.user.id,
object_id=nid,
object_repr='Node',
message='Confirmed SPAM: {}'.format(nid),
action_flag=CONFIRM_SPAM
)
return redirect('nodes:flagged-spam')
class NodeKnownSpamList(NodeSpamList):
SPAM_STATE = SpamStatus.SPAM
template_name = 'nodes/known_spam_list.html'
class NodeKnownHamList(NodeSpamList):
SPAM_STATE = SpamStatus.HAM
template_name = 'nodes/known_spam_list.html'
class NodeConfirmSpamView(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/confirm_spam.html'
permission_required = 'osf.mark_spam'
raise_exception = True
object_type = 'Node'
def delete(self, request, *args, **kwargs):
node = self.get_object()
osf_admin_change_status_identifier(node)
node.confirm_spam(save=True)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr=self.object_type,
message='Confirmed SPAM: {}'.format(node._id),
action_flag=CONFIRM_SPAM
)
if isinstance(node, Node):
return redirect(reverse_node(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = super(NodeConfirmSpamView, self).get_context_data(**kwargs)
context['link'] = 'nodes:confirm-spam'
context['resource_type'] = self.object_type.lower()
return context
class NodeConfirmHamView(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/confirm_ham.html'
permission_required = 'osf.mark_spam'
raise_exception = True
object_type = 'Node'
def get_context_data(self, **kwargs):
context = super(NodeConfirmHamView, self).get_context_data(**kwargs)
context['link'] = 'nodes:confirm-ham'
context['resource_type'] = self.object_type.lower()
return context
def delete(self, request, *args, **kwargs):
node = self.get_object()
node.confirm_ham(save=True)
osf_admin_change_status_identifier(node)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr=self.object_type,
message='Confirmed HAM: {}'.format(node._id),
action_flag=CONFIRM_HAM
)
if isinstance(node, Node):
return redirect(reverse_node(self.kwargs.get('guid')))
class NodeReindexShare(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/reindex_node_share.html'
permission_required = 'osf.mark_spam'
raise_exception = True
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
def delete(self, request, *args, **kwargs):
node = self.get_object()
update_node_share(node)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr='Node',
message='Node Reindexed (SHARE): {}'.format(node._id),
action_flag=REINDEX_SHARE
)
if isinstance(node, Node):
return redirect(reverse_node(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = super(NodeReindexShare, self).get_context_data(**kwargs)
context['link'] = 'nodes:reindex-share-node'
context['resource_type'] = 'node'
return context
class NodeReindexElastic(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/reindex_node_elastic.html'
permission_required = 'osf.mark_spam'
raise_exception = True
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
def delete(self, request, *args, **kwargs):
node = self.get_object()
search.search.update_node(node, bulk=False, async_update=False)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr='Node',
message='Node Reindexed (Elastic): {}'.format(node._id),
action_flag=REINDEX_ELASTIC
)
return redirect(reverse_node(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = super(NodeReindexElastic, self).get_context_data(**kwargs)
context['link'] = 'nodes:reindex-elastic-node'
context['resource_type'] = 'node'
return context
class StuckRegistrationsView(PermissionRequiredMixin, TemplateView):
permission_required = ('osf.view_node', 'osf.change_node')
raise_exception = True
context_object_name = 'node'
def get_object(self, queryset=None):
return Registration.load(self.kwargs.get('guid'))
class RestartStuckRegistrationsView(StuckRegistrationsView):
template_name = 'nodes/restart_registrations_modal.html'
def post(self, request, *args, **kwargs):
# Prevents circular imports that cause admin app to hang at startup
from osf.management.commands.force_archive import archive, verify
stuck_reg = self.get_object()
if verify(stuck_reg):
try:
archive(stuck_reg)
messages.success(request, 'Registration archive processes has restarted')
except Exception as exc:
messages.error(request, 'This registration cannot be unstuck due to {} '
'if the problem persists get a developer to fix it.'.format(exc.__class__.__name__))
else:
messages.error(request, 'This registration may not technically be stuck,'
' if the problem persists get a developer to fix it.')
return redirect(reverse_node(self.kwargs.get('guid')))
class RemoveStuckRegistrationsView(StuckRegistrationsView):
template_name = 'nodes/remove_registrations_modal.html'
def post(self, request, *args, **kwargs):
stuck_reg = self.get_object()
if Registration.find_failed_registrations().filter(id=stuck_reg.id).exists():
stuck_reg.delete_registration_tree(save=True)
messages.success(request, 'The registration has been deleted')
else:
messages.error(request, 'This registration may not technically be stuck,'
' if the problem persists get a developer to fix it.')
return redirect(reverse_node(self.kwargs.get('guid')))
|
pattisdr/osf.io
|
admin/nodes/views.py
|
Python
|
apache-2.0
| 19,293
|
#
# Copyright (c) SAS Institute, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
UpdateBot is a module for the automated creation and updating of a conary
packages from a yum or apt repository.
"""
from updatebot.bot import Bot
from updatebot.current import Bot as CurrentBot
from updatebot.config import UpdateBotConfig
|
sassoftware/mirrorball
|
updatebot/__init__.py
|
Python
|
apache-2.0
| 833
|
from tools.SVGGenerator import SVGGenerator
__author__ = 'Robin Quetin'
class GraphicsGenerator(object):
def __init__(self, output_format='svg'):
output_format = output_format.lower()
if output_format == 'svg':
self.ded_generator = SVGGenerator()
else:
raise RuntimeError('There is no generator registered for the provided output format.')
def generate(self, dot_code, output_path=None, model_type=None):
if output_path is None:
return self.ded_generator.generate(dot_code, model_type)
else:
self.ded_generator.generate_file(dot_code, output_path, model_type)
|
RobinQuetin/CAIRIS-web
|
cairis/cairis/tools/GraphicsGenerator.py
|
Python
|
apache-2.0
| 658
|
#!/usr/bin/env python3
"""Roll up the geo6 json to produce a tree suitable for sub9 query"""
__author__ = "H. Martin"
__version__ = "0.1.0"
import json
import math
from random import randint
data = {}
transformed = {}
with open('geohash_counter_6.json', encoding='utf-8') as data_file:
data = json.loads(data_file.read())
for geo6 in data:
geohash_info = {}
count = math.ceil(data[geo6] * 0.1)
geohash_info['count'] = count
geo4 = geo6[:4]
if geo4 not in transformed:
transformed[geo4] = {"count":0}
transformed[geo4]["count"] += count
geo5 = geo6[:5]
if geo5 not in transformed[geo4]:
transformed[geo4][geo5] = {"count":0}
transformed[geo4][geo5]["count"] += count
transformed[geo4][geo5][geo6] = geohash_info
with open('full_map.json', 'w', encoding='utf-8') as outfile:
json.dump(transformed, outfile, indent=2)
|
hbmartin/sub9-client
|
etl/produce_map.py
|
Python
|
apache-2.0
| 888
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for pw_console.log_view"""
import logging
import time
import sys
import unittest
from datetime import datetime
from unittest.mock import MagicMock, patch
from parameterized import parameterized # type: ignore
from prompt_toolkit.data_structures import Point
from pw_console.console_prefs import ConsolePrefs
from pw_console.log_view import LogView
from pw_console.log_screen import ScreenLine
from pw_console.text_formatting import (
flatten_formatted_text_tuples,
join_adjacent_style_tuples,
)
_PYTHON_3_8 = sys.version_info >= (
3,
8,
)
def _create_log_view():
log_pane = MagicMock()
log_pane.pane_resized = MagicMock(return_value=True)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
application = MagicMock()
application.prefs = ConsolePrefs()
application.prefs.reset_config()
log_view = LogView(log_pane, application)
return log_view, log_pane
class TestLogView(unittest.TestCase):
"""Tests for LogView."""
# pylint: disable=invalid-name
def setUp(self):
self.maxDiff = None
# pylint: enable=invalid-name
def _create_log_view_with_logs(self, log_count=100):
log_view, log_pane = _create_log_view()
if log_count > 0:
test_log = logging.getLogger('log_view.test')
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for i in range(log_count):
test_log.debug('Test log %s', i)
return log_view, log_pane
def test_follow_toggle(self) -> None:
log_view, _pane = _create_log_view()
self.assertTrue(log_view.follow)
log_view.toggle_follow()
self.assertFalse(log_view.follow)
def test_follow_scrolls_to_bottom(self) -> None:
log_view, _pane = _create_log_view()
log_view.toggle_follow()
_fragments = log_view.render_content()
self.assertFalse(log_view.follow)
self.assertEqual(log_view.get_current_line(), 0)
test_log = logging.getLogger('log_view.test')
# Log 5 messagse, current_line should stay at 0
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for i in range(5):
test_log.debug('Test log %s', i)
_fragments = log_view.render_content()
self.assertEqual(log_view.get_total_count(), 5)
self.assertEqual(log_view.get_current_line(), 0)
# Turn follow on
log_view.toggle_follow()
self.assertTrue(log_view.follow)
# Log another messagse, current_line should move to the last.
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
test_log.debug('Test log')
_fragments = log_view.render_content()
self.assertEqual(log_view.get_total_count(), 6)
self.assertEqual(log_view.get_current_line(), 5)
def test_scrolling(self) -> None:
"""Test all scrolling methods."""
log_view, log_pane = self._create_log_view_with_logs(log_count=100)
# Page scrolling needs to know the current window height.
log_pane.pane_resized = MagicMock(return_value=True)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
log_view.render_content()
# Follow is on by default, current line should be at the end.
self.assertEqual(log_view.get_current_line(), 99)
# Move to the beginning.
log_view.scroll_to_top()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 0)
# Should not be able to scroll before the beginning.
log_view.scroll_up()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 0)
log_view.scroll_up_one_page()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 0)
# Single and multi line movement.
log_view.scroll_down()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 1)
log_view.scroll(5)
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 6)
log_view.scroll_up()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 5)
# Page down and up.
log_view.scroll_down_one_page()
self.assertEqual(log_view.get_current_line(), 15)
log_view.scroll_up_one_page()
self.assertEqual(log_view.get_current_line(), 5)
# Move to the end.
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 99)
# Should not be able to scroll beyond the end.
log_view.scroll_down()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 99)
log_view.scroll_down_one_page()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 99)
# Move up a bit to turn off follow
self.assertEqual(log_view.log_screen.cursor_position, 9)
log_view.scroll(-1)
self.assertEqual(log_view.log_screen.cursor_position, 8)
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 98)
# Simulate a mouse click to scroll.
# Click 1 lines from the top of the window.
log_view.scroll_to_position(Point(0, 1))
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 90)
# Disable follow mode if mouse click on line.
log_view.toggle_follow()
log_view.render_content()
self.assertTrue(log_view.follow)
self.assertEqual(log_view.get_current_line(), 99)
log_view.scroll_to_position(Point(0, 5))
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 95)
self.assertFalse(log_view.follow)
def test_render_content_and_cursor_position(self) -> None:
"""Test render_content results and get_cursor_position
get_cursor_position() should return the correct position depending on
what line is selected."""
# Mock time to always return the same value.
mock_time = MagicMock( # type: ignore
return_value=time.mktime(
datetime(2021, 7, 13, 0, 0, 0).timetuple()))
with patch('time.time', new=mock_time):
log_view, log_pane = self._create_log_view_with_logs(log_count=4)
# Mock needed LogPane functions that pull info from prompt_toolkit.
log_pane.get_horizontal_scroll_amount = MagicMock(return_value=0)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
log_view.render_content()
log_view.scroll_to_top()
log_view.render_content()
# Scroll to top keeps the cursor on the bottom of the window.
self.assertEqual(log_view.get_cursor_position(), Point(x=0, y=9))
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_cursor_position(), Point(x=0, y=9))
expected_formatted_text = [
('', ''),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
('', ' Test log 0'),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
('', ' Test log 1'),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
('', ' Test log 2'),
('class:selected-log-line class:log-time', '20210713 00:00:00'),
('class:selected-log-line ', ' '),
('class:selected-log-line class:log-level-10', 'DEBUG'),
('class:selected-log-line ',
' Test log 3 ')
] # yapf: disable
result_text = join_adjacent_style_tuples(
flatten_formatted_text_tuples(log_view._line_fragment_cache)) # pylint: disable=protected-access
self.assertEqual(result_text, expected_formatted_text)
def test_clear_scrollback(self) -> None:
"""Test various functions with clearing log scrollback history."""
# pylint: disable=protected-access
# Create log_view with 4 logs
starting_log_count = 4
log_view, _pane = self._create_log_view_with_logs(
log_count=starting_log_count)
log_view.render_content()
# Check setup is correct
self.assertTrue(log_view.follow)
self.assertEqual(log_view.get_current_line(), 3)
self.assertEqual(log_view.get_total_count(), 4)
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()),
['Test log 0', 'Test log 1', 'Test log 2', 'Test log 3'])
# Clear scrollback
log_view.clear_scrollback()
log_view.render_content()
# Follow is still on
self.assertTrue(log_view.follow)
self.assertEqual(log_view.hidden_line_count(), 4)
# Current line index should stay the same
self.assertEqual(log_view.get_current_line(), 3)
# Total count should stay the same
self.assertEqual(log_view.get_total_count(), 4)
# No lines returned
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()), [])
# Add Log 4 more lines
test_log = logging.getLogger('log_view.test')
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for i in range(4):
test_log.debug('Test log %s', i + starting_log_count)
log_view.render_content()
# Current line
self.assertEqual(log_view.hidden_line_count(), 4)
self.assertEqual(log_view.get_last_log_index(), 7)
self.assertEqual(log_view.get_current_line(), 7)
self.assertEqual(log_view.get_total_count(), 8)
# Only the last 4 logs should appear
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()),
['Test log 4', 'Test log 5', 'Test log 6', 'Test log 7'])
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 7)
# Turn follow back on
log_view.toggle_follow()
log_view.undo_clear_scrollback()
# Current line and total are the same
self.assertEqual(log_view.get_current_line(), 7)
self.assertEqual(log_view.get_total_count(), 8)
# All logs should appear
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()), [
'Test log 0', 'Test log 1', 'Test log 2', 'Test log 3',
'Test log 4', 'Test log 5', 'Test log 6', 'Test log 7'
])
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 7)
def test_get_line_at_cursor_position(self) -> None:
"""Tests fuctions that rely on getting a log_index for the current
cursor position.
Including:
- LogScreen.fetch_subline_up
- LogScreen.fetch_subline_down
- LogView._update_log_index
"""
# pylint: disable=protected-access
# Create log_view with 4 logs
starting_log_count = 4
log_view, _pane = self._create_log_view_with_logs(
log_count=starting_log_count)
log_view.render_content()
# Check setup is correct
self.assertTrue(log_view.follow)
self.assertEqual(log_view.get_current_line(), 3)
self.assertEqual(log_view.get_total_count(), 4)
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()),
['Test log 0', 'Test log 1', 'Test log 2', 'Test log 3'])
self.assertEqual(log_view.log_screen.cursor_position, 9)
# Force the cursor_position to be larger than the log_screen
# line_buffer.
log_view.log_screen.cursor_position = 10
# Attempt to get the current line, no exception should be raised
result = log_view.log_screen.get_line_at_cursor_position()
# Log index should be None
self.assertEqual(result.log_index, None)
# Force the cursor_position to be < 0. This won't produce an error but
# would wrap around to the beginning.
log_view.log_screen.cursor_position = -1
# Attempt to get the current line, no exception should be raised
result = log_view.log_screen.get_line_at_cursor_position()
# Result should be a blank line
self.assertEqual(result, ScreenLine([('', '')]))
# Log index should be None
self.assertEqual(result.log_index, None)
def test_visual_select(self) -> None:
"""Test log line selection."""
log_view, log_pane = self._create_log_view_with_logs(log_count=100)
self.assertEqual(100, log_view.get_total_count())
# Page scrolling needs to know the current window height.
log_pane.pane_resized = MagicMock(return_value=True)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
log_view.log_screen.reset_logs = MagicMock(
wraps=log_view.log_screen.reset_logs)
log_view.log_screen.get_lines = MagicMock(
wraps=log_view.log_screen.get_lines)
log_view.render_content()
log_view.log_screen.reset_logs.assert_called_once()
log_view.log_screen.get_lines.assert_called_once_with(
marked_logs_start=None, marked_logs_end=None)
log_view.log_screen.get_lines.reset_mock()
log_view.log_screen.reset_logs.reset_mock()
self.assertIsNone(log_view.marked_logs_start)
self.assertIsNone(log_view.marked_logs_end)
log_view.visual_select_line(Point(0, 9))
self.assertEqual(
(99, 99), (log_view.marked_logs_start, log_view.marked_logs_end))
log_view.visual_select_line(Point(0, 8))
log_view.visual_select_line(Point(0, 7))
self.assertEqual(
(97, 99), (log_view.marked_logs_start, log_view.marked_logs_end))
log_view.clear_visual_selection()
self.assertIsNone(log_view.marked_logs_start)
self.assertIsNone(log_view.marked_logs_end)
log_view.visual_select_line(Point(0, 1))
log_view.visual_select_line(Point(0, 2))
log_view.visual_select_line(Point(0, 3))
log_view.visual_select_line(Point(0, 4))
self.assertEqual(
(91, 94), (log_view.marked_logs_start, log_view.marked_logs_end))
# Make sure the log screen was not re-generated.
log_view.log_screen.reset_logs.assert_not_called()
log_view.log_screen.reset_logs.reset_mock()
# Render the screen
log_view.render_content()
log_view.log_screen.reset_logs.assert_called_once()
# Check the visual selection was specified
log_view.log_screen.get_lines.assert_called_once_with(
marked_logs_start=91, marked_logs_end=94)
log_view.log_screen.get_lines.reset_mock()
log_view.log_screen.reset_logs.reset_mock()
if _PYTHON_3_8:
from unittest import IsolatedAsyncioTestCase # type: ignore # pylint: disable=no-name-in-module
class TestLogViewFiltering(IsolatedAsyncioTestCase): # pylint: disable=undefined-variable
"""Test LogView log filtering capabilities."""
# pylint: disable=invalid-name
def setUp(self):
self.maxDiff = None
# pylint: enable=invalid-name
def _create_log_view_from_list(self, log_messages):
log_view, log_pane = _create_log_view()
test_log = logging.getLogger('log_view.test')
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for log, extra_arg in log_messages:
test_log.debug('%s', log, extra=extra_arg)
return log_view, log_pane
@parameterized.expand([
(
# Test name
'regex filter',
# Search input_text
'log.*item',
# input_logs
[
('Log some item', dict()),
('Log another item', dict()),
('Some exception', dict()),
],
# expected_matched_lines
[
'Log some item',
'Log another item',
],
# expected_match_line_numbers
{0: 0, 1: 1},
# expected_export_text
(
' DEBUG Log some item\n'
' DEBUG Log another item\n'
),
None, # field
False, # invert
),
(
# Test name
'regex filter with field',
# Search input_text
'earth',
# input_logs
[
('Log some item',
dict(extra_metadata_fields={'planet': 'Jupiter'})),
('Log another item',
dict(extra_metadata_fields={'planet': 'Earth'})),
('Some exception',
dict(extra_metadata_fields={'planet': 'Earth'})),
],
# expected_matched_lines
[
'Log another item',
'Some exception',
],
# expected_match_line_numbers
{1: 0, 2: 1},
# expected_export_text
(
' DEBUG Earth Log another item\n'
' DEBUG Earth Some exception\n'
),
'planet', # field
False, # invert
),
(
# Test name
'regex filter with field inverted',
# Search input_text
'earth',
# input_logs
[
('Log some item',
dict(extra_metadata_fields={'planet': 'Jupiter'})),
('Log another item',
dict(extra_metadata_fields={'planet': 'Earth'})),
('Some exception',
dict(extra_metadata_fields={'planet': 'Earth'})),
],
# expected_matched_lines
[
'Log some item',
],
# expected_match_line_numbers
{0: 0},
# expected_export_text
(
' DEBUG Jupiter Log some item\n'
),
'planet', # field
True, # invert
),
]) # yapf: disable
async def test_log_filtering(
self,
_test_name,
input_text,
input_logs,
expected_matched_lines,
expected_match_line_numbers,
expected_export_text,
field=None,
invert=False,
) -> None:
"""Test run log view filtering."""
log_view, _log_pane = self._create_log_view_from_list(input_logs)
log_view.render_content()
self.assertEqual(log_view.get_total_count(), len(input_logs))
# Apply the search and wait for the match count background task
log_view.new_search(input_text, invert=invert, field=field)
await log_view.search_match_count_task
self.assertEqual(log_view.search_matched_lines,
expected_match_line_numbers)
# Apply the filter and wait for the filter background task
log_view.apply_filter()
await log_view.filter_existing_logs_task
# Do the number of logs match the expected count?
self.assertEqual(log_view.get_total_count(),
len(expected_matched_lines))
self.assertEqual(
[log.record.message for log in log_view.filtered_logs],
expected_matched_lines)
# Check exported text respects filtering
log_text = log_view._logs_to_text( # pylint: disable=protected-access
use_table_formatting=True)
# Remove leading time from resulting logs
log_text_no_datetime = ''
for line in log_text.splitlines():
log_text_no_datetime += (line[17:] + '\n')
self.assertEqual(log_text_no_datetime, expected_export_text)
# Select the bottom log line
log_view.render_content()
log_view.visual_select_line(Point(0, 9)) # Window height is 10
# Export to text
log_text = log_view._logs_to_text( # pylint: disable=protected-access
selected_lines_only=True,
use_table_formatting=False)
self.assertEqual(
# Remove date, time, and level
log_text[24:].strip(),
expected_matched_lines[0].strip())
# Clear filters and check the numbe of lines is back to normal.
log_view.clear_filters()
self.assertEqual(log_view.get_total_count(), len(input_logs))
if __name__ == '__main__':
unittest.main()
|
google/pigweed
|
pw_console/py/log_view_test.py
|
Python
|
apache-2.0
| 22,584
|
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication using the DynamixelSDK."""
import atexit
import logging
import time
from typing import Optional, Sequence, Union, Tuple
import numpy as np
PROTOCOL_VERSION = 2.0
# The following addresses assume XH motors.
ADDR_TORQUE_ENABLE = 64
ADDR_GOAL_POSITION = 116
ADDR_PRESENT_POSITION = 132
ADDR_PRESENT_VELOCITY = 128
ADDR_PRESENT_CURRENT = 126
ADDR_PRESENT_POS_VEL_CUR = 126
# Data Byte Length
LEN_PRESENT_POSITION = 4
LEN_PRESENT_VELOCITY = 4
LEN_PRESENT_CURRENT = 2
LEN_PRESENT_POS_VEL_CUR = 10
LEN_GOAL_POSITION = 4
DEFAULT_POS_SCALE = 2.0 * np.pi / 4096 # 0.088 degrees
# See http://emanual.robotis.com/docs/en/dxl/x/xh430-v210/#goal-velocity
DEFAULT_VEL_SCALE = 0.229 * 2.0 * np.pi / 60.0 # 0.229 rpm
DEFAULT_CUR_SCALE = 1.34
def dynamixel_cleanup_handler():
"""Cleanup function to ensure Dynamixels are disconnected properly."""
open_clients = list(DynamixelClient.OPEN_CLIENTS)
for open_client in open_clients:
if open_client.port_handler.is_using:
logging.warning('Forcing client to close.')
open_client.port_handler.is_using = False
open_client.disconnect()
def signed_to_unsigned(value: int, size: int) -> int:
"""Converts the given value to its unsigned representation."""
if value < 0:
bit_size = 8 * size
max_value = (1 << bit_size) - 1
value = max_value + value
return value
def unsigned_to_signed(value: int, size: int) -> int:
"""Converts the given value from its unsigned representation."""
bit_size = 8 * size
if (value & (1 << (bit_size - 1))) != 0:
value = -((1 << bit_size) - value)
return value
class DynamixelClient:
"""Client for communicating with Dynamixel motors.
NOTE: This only supports Protocol 2.
"""
# The currently open clients.
OPEN_CLIENTS = set()
def __init__(self,
motor_ids: Sequence[int],
port: str = '/dev/ttyUSB0',
baudrate: int = 1000000,
lazy_connect: bool = False,
pos_scale: Optional[float] = None,
vel_scale: Optional[float] = None,
cur_scale: Optional[float] = None):
"""Initializes a new client.
Args:
motor_ids: All motor IDs being used by the client.
port: The Dynamixel device to talk to. e.g.
- Linux: /dev/ttyUSB0
- Mac: /dev/tty.usbserial-*
- Windows: COM1
baudrate: The Dynamixel baudrate to communicate with.
lazy_connect: If True, automatically connects when calling a method
that requires a connection, if not already connected.
pos_scale: The scaling factor for the positions. This is
motor-dependent. If not provided, uses the default scale.
vel_scale: The scaling factor for the velocities. This is
motor-dependent. If not provided uses the default scale.
cur_scale: The scaling factor for the currents. This is
motor-dependent. If not provided uses the default scale.
"""
import dynamixel_sdk
self.dxl = dynamixel_sdk
self.motor_ids = list(motor_ids)
self.port_name = port
self.baudrate = baudrate
self.lazy_connect = lazy_connect
self.port_handler = self.dxl.PortHandler(port)
self.packet_handler = self.dxl.PacketHandler(PROTOCOL_VERSION)
self._pos_vel_cur_reader = DynamixelPosVelCurReader(
self,
self.motor_ids,
pos_scale=pos_scale if pos_scale is not None else DEFAULT_POS_SCALE,
vel_scale=vel_scale if vel_scale is not None else DEFAULT_VEL_SCALE,
cur_scale=cur_scale if cur_scale is not None else DEFAULT_CUR_SCALE,
)
self._sync_writers = {}
self.OPEN_CLIENTS.add(self)
@property
def is_connected(self) -> bool:
return self.port_handler.is_open
def connect(self):
"""Connects to the Dynamixel motors.
NOTE: This should be called after all DynamixelClients on the same
process are created.
"""
assert not self.is_connected, 'Client is already connected.'
if self.port_handler.openPort():
logging.info('Succeeded to open port: %s', self.port_name)
else:
raise OSError(
('Failed to open port at {} (Check that the device is powered '
'on and connected to your computer).').format(self.port_name))
if self.port_handler.setBaudRate(self.baudrate):
logging.info('Succeeded to set baudrate to %d', self.baudrate)
else:
raise OSError(
('Failed to set the baudrate to {} (Ensure that the device was '
'configured for this baudrate).').format(self.baudrate))
# Start with all motors enabled.
self.set_torque_enabled(self.motor_ids, True)
def disconnect(self):
"""Disconnects from the Dynamixel device."""
if not self.is_connected:
return
if self.port_handler.is_using:
logging.error('Port handler in use; cannot disconnect.')
return
# Ensure motors are disabled at the end.
self.set_torque_enabled(self.motor_ids, False, retries=0)
self.port_handler.closePort()
if self in self.OPEN_CLIENTS:
self.OPEN_CLIENTS.remove(self)
def set_torque_enabled(self,
motor_ids: Sequence[int],
enabled: bool,
retries: int = -1,
retry_interval: float = 0.25):
"""Sets whether torque is enabled for the motors.
Args:
motor_ids: The motor IDs to configure.
enabled: Whether to engage or disengage the motors.
retries: The number of times to retry. If this is <0, will retry
forever.
retry_interval: The number of seconds to wait between retries.
"""
remaining_ids = list(motor_ids)
while remaining_ids:
remaining_ids = self.write_byte(
remaining_ids,
int(enabled),
ADDR_TORQUE_ENABLE,
)
if remaining_ids:
logging.error('Could not set torque %s for IDs: %s',
'enabled' if enabled else 'disabled',
str(remaining_ids))
if retries == 0:
break
time.sleep(retry_interval)
retries -= 1
def read_pos_vel_cur(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns the current positions and velocities."""
return self._pos_vel_cur_reader.read()
def write_desired_pos(self, motor_ids: Sequence[int],
positions: np.ndarray):
"""Writes the given desired positions.
Args:
motor_ids: The motor IDs to write to.
positions: The joint angles in radians to write.
"""
assert len(motor_ids) == len(positions)
# Convert to Dynamixel position space.
positions = positions / self._pos_vel_cur_reader.pos_scale
self.sync_write(motor_ids, positions, ADDR_GOAL_POSITION,
LEN_GOAL_POSITION)
def write_byte(
self,
motor_ids: Sequence[int],
value: int,
address: int,
) -> Sequence[int]:
"""Writes a value to the motors.
Args:
motor_ids: The motor IDs to write to.
value: The value to write to the control table.
address: The control table address to write to.
Returns:
A list of IDs that were unsuccessful.
"""
self.check_connected()
errored_ids = []
for motor_id in motor_ids:
comm_result, dxl_error = self.packet_handler.write1ByteTxRx(
self.port_handler, motor_id, address, value)
success = self.handle_packet_result(
comm_result, dxl_error, motor_id, context='write_byte')
if not success:
errored_ids.append(motor_id)
return errored_ids
def sync_write(self, motor_ids: Sequence[int],
values: Sequence[Union[int, float]], address: int,
size: int):
"""Writes values to a group of motors.
Args:
motor_ids: The motor IDs to write to.
values: The values to write.
address: The control table address to write to.
size: The size of the control table value being written to.
"""
self.check_connected()
key = (address, size)
if key not in self._sync_writers:
self._sync_writers[key] = self.dxl.GroupSyncWrite(
self.port_handler, self.packet_handler, address, size)
sync_writer = self._sync_writers[key]
errored_ids = []
for motor_id, desired_pos in zip(motor_ids, values):
value = signed_to_unsigned(int(desired_pos), size=size)
value = value.to_bytes(size, byteorder='little')
success = sync_writer.addParam(motor_id, value)
if not success:
errored_ids.append(motor_id)
if errored_ids:
logging.error('Sync write failed for: %s', str(errored_ids))
comm_result = sync_writer.txPacket()
self.handle_packet_result(comm_result, context='sync_write')
sync_writer.clearParam()
def check_connected(self):
"""Ensures the robot is connected."""
if self.lazy_connect and not self.is_connected:
self.connect()
if not self.is_connected:
raise OSError('Must call connect() first.')
def handle_packet_result(self,
comm_result: int,
dxl_error: Optional[int] = None,
dxl_id: Optional[int] = None,
context: Optional[str] = None):
"""Handles the result from a communication request."""
error_message = None
if comm_result != self.dxl.COMM_SUCCESS:
error_message = self.packet_handler.getTxRxResult(comm_result)
elif dxl_error is not None:
error_message = self.packet_handler.getRxPacketError(dxl_error)
if error_message:
if dxl_id is not None:
error_message = '[Motor ID: {}] {}'.format(
dxl_id, error_message)
if context is not None:
error_message = '> {}: {}'.format(context, error_message)
logging.error(error_message)
return False
return True
def convert_to_unsigned(self, value: int, size: int) -> int:
"""Converts the given value to its unsigned representation."""
if value < 0:
max_value = (1 << (8 * size)) - 1
value = max_value + value
return value
def __enter__(self):
"""Enables use as a context manager."""
if not self.is_connected:
self.connect()
return self
def __exit__(self, *args):
"""Enables use as a context manager."""
self.disconnect()
def __del__(self):
"""Automatically disconnect on destruction."""
self.disconnect()
class DynamixelReader:
"""Reads data from Dynamixel motors.
This wraps a GroupBulkRead from the DynamixelSDK.
"""
def __init__(self, client: DynamixelClient, motor_ids: Sequence[int],
address: int, size: int):
"""Initializes a new reader."""
self.client = client
self.motor_ids = motor_ids
self.address = address
self.size = size
self._initialize_data()
self.operation = self.client.dxl.GroupBulkRead(client.port_handler,
client.packet_handler)
for motor_id in motor_ids:
success = self.operation.addParam(motor_id, address, size)
if not success:
raise OSError(
'[Motor ID: {}] Could not add parameter to bulk read.'
.format(motor_id))
def read(self, retries: int = 1):
"""Reads data from the motors."""
self.client.check_connected()
success = False
while not success and retries >= 0:
comm_result = self.operation.txRxPacket()
success = self.client.handle_packet_result(
comm_result, context='read')
retries -= 1
# If we failed, send a copy of the previous data.
if not success:
return self._get_data()
errored_ids = []
for i, motor_id in enumerate(self.motor_ids):
# Check if the data is available.
available = self.operation.isAvailable(motor_id, self.address,
self.size)
if not available:
errored_ids.append(motor_id)
continue
self._update_data(i, motor_id)
if errored_ids:
logging.error('Bulk read data is unavailable for: %s',
str(errored_ids))
return self._get_data()
def _initialize_data(self):
"""Initializes the cached data."""
self._data = np.zeros(len(self.motor_ids), dtype=np.float32)
def _update_data(self, index: int, motor_id: int):
"""Updates the data index for the given motor ID."""
self._data[index] = self.operation.getData(motor_id, self.address,
self.size)
def _get_data(self):
"""Returns a copy of the data."""
return self._data.copy()
class DynamixelPosVelCurReader(DynamixelReader):
"""Reads positions and velocities."""
def __init__(self,
client: DynamixelClient,
motor_ids: Sequence[int],
pos_scale: float = 1.0,
vel_scale: float = 1.0,
cur_scale: float = 1.0):
super().__init__(
client,
motor_ids,
address=ADDR_PRESENT_POS_VEL_CUR,
size=LEN_PRESENT_POS_VEL_CUR,
)
self.pos_scale = pos_scale
self.vel_scale = vel_scale
self.cur_scale = cur_scale
def _initialize_data(self):
"""Initializes the cached data."""
self._pos_data = np.zeros(len(self.motor_ids), dtype=np.float32)
self._vel_data = np.zeros(len(self.motor_ids), dtype=np.float32)
self._cur_data = np.zeros(len(self.motor_ids), dtype=np.float32)
def _update_data(self, index: int, motor_id: int):
"""Updates the data index for the given motor ID."""
cur = self.operation.getData(motor_id, ADDR_PRESENT_CURRENT,
LEN_PRESENT_CURRENT)
vel = self.operation.getData(motor_id, ADDR_PRESENT_VELOCITY,
LEN_PRESENT_VELOCITY)
pos = self.operation.getData(motor_id, ADDR_PRESENT_POSITION,
LEN_PRESENT_POSITION)
cur = unsigned_to_signed(cur, size=2)
vel = unsigned_to_signed(vel, size=4)
pos = unsigned_to_signed(pos, size=4)
self._pos_data[index] = float(pos) * self.pos_scale
self._vel_data[index] = float(vel) * self.vel_scale
self._cur_data[index] = float(cur) * self.cur_scale
def _get_data(self):
"""Returns a copy of the data."""
return (self._pos_data.copy(), self._vel_data.copy(),
self._cur_data.copy())
# Register global cleanup function.
atexit.register(dynamixel_cleanup_handler)
if __name__ == '__main__':
import argparse
import itertools
parser = argparse.ArgumentParser()
parser.add_argument(
'-m',
'--motors',
required=True,
help='Comma-separated list of motor IDs.')
parser.add_argument(
'-d',
'--device',
default='/dev/ttyUSB0',
help='The Dynamixel device to connect to.')
parser.add_argument(
'-b', '--baud', default=1000000, help='The baudrate to connect with.')
parsed_args = parser.parse_args()
motors = [int(motor) for motor in parsed_args.motors.split(',')]
way_points = [np.zeros(len(motors)), np.full(len(motors), np.pi)]
with DynamixelClient(motors, parsed_args.device,
parsed_args.baud) as dxl_client:
for step in itertools.count():
if step > 0 and step % 50 == 0:
way_point = way_points[(step // 100) % len(way_points)]
print('Writing: {}'.format(way_point.tolist()))
dxl_client.write_desired_pos(motors, way_point)
read_start = time.time()
pos_now, vel_now, cur_now = dxl_client.read_pos_vel_cur()
if step % 5 == 0:
print('[{}] Frequency: {:.2f} Hz'.format(
step, 1.0 / (time.time() - read_start)))
print('> Pos: {}'.format(pos_now.tolist()))
print('> Vel: {}'.format(vel_now.tolist()))
print('> Cur: {}'.format(cur_now.tolist()))
|
google-research/robel
|
robel/components/robot/dynamixel_client.py
|
Python
|
apache-2.0
| 17,950
|
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import time
import ipaddr
# Compatibility function to cast str to bytes objects
if ipaddr._compat_has_real_bytes:
_cb = lambda bytestr: bytes(bytestr, 'charmap')
else:
_cb = str
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64')
def tearDown(self):
del(self.ipv4)
del(self.ipv4_hostmask)
del(self.ipv6)
del(self)
def testRepr(self):
self.assertEqual("IPv4Network('1.2.3.4/32')",
repr(ipaddr.IPv4Network('1.2.3.4')))
self.assertEqual("IPv6Network('::1/128')",
repr(ipaddr.IPv6Network('::1')))
def testAutoMasking(self):
addr1 = ipaddr.IPv4Network('1.1.1.255/24')
addr1_masked = ipaddr.IPv4Network('1.1.1.0/24')
self.assertEqual(addr1_masked, addr1.masked())
addr2 = ipaddr.IPv6Network('2000:cafe::efac:100/96')
addr2_masked = ipaddr.IPv6Network('2000:cafe::/96')
self.assertEqual(addr2_masked, addr2.masked())
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') + 255,
ipaddr.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') - 256,
ipaddr.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddr.IPv6Address('::1') + (2**16 - 2),
ipaddr.IPv6Address('::ffff'))
self.assertEqual(ipaddr.IPv6Address('::ffff') - (2**16 - 2),
ipaddr.IPv6Address('::1'))
def testInvalidStrings(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '')
self.assertRaises(ValueError, ipaddr.IPNetwork, 'www.google.com')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3.4.5')
self.assertRaises(ValueError, ipaddr.IPNetwork, '301.2.2.2')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':2:3:4:5:6:7:8')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:9')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1::3:4:5:6::8')
self.assertRaises(ValueError, ipaddr.IPNetwork, 'a:')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '::a:')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1ffff::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '0xa::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:1a.2.3.4')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:1.2.3.4:8')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'::1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'cafe:cafe::/128/190')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Address(1)._ip_int_from_string,
'1.a.2.3')
self.assertEqual(False, ipaddr.IPv4Network(1)._is_hostmask('1.a.2.3'))
def testGetNetwork(self):
self.assertEqual(int(self.ipv4.network), 16909056)
self.assertEqual(str(self.ipv4.network), '1.2.3.0')
self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0')
self.assertEqual(int(self.ipv6.network),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6.network),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6.hostmask),
'::ffff:ffff:ffff:ffff')
def testBadVersionComparison(self):
# These should always raise TypeError
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
def testMixedTypeComparison(self):
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1/32')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPNetwork('::1/128')
self.assertRaises(TypeError, lambda: v4addr < v4net)
self.assertRaises(TypeError, lambda: v4addr > v4net)
self.assertRaises(TypeError, lambda: v4net < v4addr)
self.assertRaises(TypeError, lambda: v4net > v4addr)
self.assertRaises(TypeError, lambda: v6addr < v6net)
self.assertRaises(TypeError, lambda: v6addr > v6net)
self.assertRaises(TypeError, lambda: v6net < v6addr)
self.assertRaises(TypeError, lambda: v6net > v6addr)
# with get_mixed_type_key, you can sort addresses and network.
self.assertEqual([v4addr, v4net], sorted([v4net, v4addr],
key=ipaddr.get_mixed_type_key))
self.assertEqual([v6addr, v6net], sorted([v6net, v6addr],
key=ipaddr.get_mixed_type_key))
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, 2**32)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, -1)
ipv4 = ipaddr.IPNetwork('1.2.3.4')
ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4)))
self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, 2**128)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, -1)
self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6)
if ipaddr._compat_has_real_bytes: # on python3+
def testIpFromPacked(self):
ip = ipaddr.IP
self.assertEqual(self.ipv4.ip,
ip(_cb('\x01\x02\x03\x04')).ip)
self.assertEqual(ip('255.254.253.252'),
ip(_cb('\xff\xfe\xfd\xfc')))
self.assertRaises(ValueError, ipaddr.IP, _cb('\x00' * 3))
self.assertRaises(ValueError, ipaddr.IP, _cb('\x00' * 5))
self.assertEqual(self.ipv6.ip,
ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01')).ip)
self.assertEqual(ip('ffff:2:3:4:ffff::'),
ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' +
'\xff\xff' + '\x00' * 6)))
self.assertEqual(ip('::'),
ip(_cb('\x00' * 16)))
self.assertRaises(ValueError, ip, _cb('\x00' * 15))
self.assertRaises(ValueError, ip, _cb('\x00' * 17))
def testGetIp(self):
self.assertEqual(int(self.ipv4.ip), 16909060)
self.assertEqual(str(self.ipv4.ip), '1.2.3.4')
self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1')
self.assertEqual(int(self.ipv6.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4.netmask), 4294967040L)
self.assertEqual(str(self.ipv4.netmask), '255.255.255.0')
self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0')
self.assertEqual(int(self.ipv6.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.netmask), 0)
self.assert_(ipv4_zero_netmask._is_valid_netmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6Network('::1/0')
self.assertEqual(int(ipv6_zero_netmask.netmask), 0)
self.assert_(ipv6_zero_netmask._is_valid_netmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4.broadcast), 16909311L)
self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255')
self.assertEqual(int(self.ipv6.broadcast),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6.broadcast),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0')
self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(),
ipaddr.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6.supernet().network),
'2001:658:22a:cafe::')
self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(),
ipaddr.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0')
self.assertEqual(self.ipv6.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6.supernet(3).network),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25)
self.assertEqual(self.ipv4.supernet(prefixlen_diff=2),
self.ipv4.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65)
self.assertEqual(self.ipv6.supernet(prefixlen_diff=2),
self.ipv6.supernet(new_prefix=62))
def testIterSubnets(self):
self.assertEqual(self.ipv4.subnet(), list(self.ipv4.iter_subnets()))
self.assertEqual(self.ipv6.subnet(), list(self.ipv6.iter_subnets()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)),
sorted(self.ipv4.subnet(new_prefix=27)))
self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23)
self.assertRaises(ValueError, self.ipv4.subnet,
prefixlen_diff=3, new_prefix=27)
self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)),
sorted(self.ipv6.subnet(new_prefix=68)))
self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63)
self.assertRaises(ValueError, self.ipv6.subnet,
prefixlen_diff=4, new_prefix=68)
def testGetSubnet(self):
self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25)
self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0')
self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128')
self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, 9)
self.assertRaises(ValueError, self.ipv6.subnet, 65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.supernet, 25)
self.assertRaises(ValueError, self.ipv6.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, -1)
self.assertRaises(ValueError, self.ipv6.subnet, -1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4)
self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4)
self.assertFalse(self.ipv4 in self.ipv6)
self.assertFalse(self.ipv6 in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
# We can test addresses and string as well.
addr1 = ipaddr.IPv4Address('1.2.3.37')
self.assertTrue(addr1 in self.ipv4)
def testBadAddress(self):
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'poop')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.256')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'poopv6')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.4/32/24')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '10/8')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, '10/8')
def testBadNetMask(self):
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/33')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.1.1.1/240.255.0.0')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/129')
def testNth(self):
self.assertEqual(str(self.ipv4[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(str(self.ipv6[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEquals(self):
self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertFalse(self.ipv4 == '')
self.assertFalse(self.ipv4 == [])
self.assertFalse(self.ipv4 == 2)
self.assertTrue(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv6 == '')
self.assertFalse(self.ipv6 == [])
self.assertFalse(self.ipv6 == 2)
def testNotEquals(self):
addr1 = ipaddr.IPAddress('1.2.3.4')
self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == addr1)
self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertTrue(self.ipv4 != '')
self.assertTrue(self.ipv4 != [])
self.assertTrue(self.ipv4 != 2)
addr2 = ipaddr.IPAddress('2001:658:22a:cafe:200::1')
self.assertFalse(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6 == addr2)
self.assertTrue(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv6 != '')
self.assertTrue(self.ipv6 != [])
self.assertTrue(self.ipv6 != 2)
def testSlash32Constructor(self):
self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEquals(str(ipaddr.IPv6Network('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Address('1.1.1.4')
ip6 = ipaddr.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'),
ipaddr.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Network('1.1.1.4/30')
ip6 = ipaddr.IPv4Network('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')])
# test only IP networks
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
ip6 = ipaddr.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'),
ipaddr.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddr.collapse_address_list([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ipaddr.IPNetwork('1.1.1.1/32')])
ip1 = ipaddr.IPv6Network('::2001:1/100')
ip2 = ipaddr.IPv6Network('::2002:1/120')
ip3 = ipaddr.IPv6Network('::2001:1/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
# the toejam test
ip1 = ipaddr.IPAddress('1.1.1.1')
ip2 = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, ipaddr.collapse_address_list,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddr.IPAddress
#ipnet = ipaddr.IPNetwork
summarize = ipaddr.summarize_address_range
ip1 = ipaddr.IPAddress('1.1.1.0')
ip2 = ipaddr.IPAddress('1.1.1.255')
# test a /24 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('1.1.1.8')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'),
ipaddr.IPNetwork('1.1.1.8')])
ip1 = ipaddr.IPAddress('1::')
ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('2::')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'),
ipaddr.IPNetwork('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'),
ipaddr.IPAddress('1.1.0.0'))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'),
ipaddr.IPNetwork('1.1.0.0'))
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0'))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, summarize, ipaddr.IPAddress('::'),
ipaddr.IPNetwork('1.1.0.0'))
def testAddressComparison(self):
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.2'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4Network('1.1.1.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.1/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEquals(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEquals(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddr.IPv6Network('2001::2000/96')
ip2 = ipaddr.IPv6Network('2001::2001/96')
ip3 = ipaddr.IPv6Network('2001:ffff::2000/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEquals(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEquals(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
ipv6 = ipaddr.IPv6Network('::/0')
ipv4 = ipaddr.IPv4Network('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddr.IPNetwork('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddr.IPNetwork('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddr.IPNetwork('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# <=, >=
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.2'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2'))
self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '192.168.1.1/24',
strict=True)
self.assertRaises(ValueError, ipaddr.IPNetwork, '::1/120', strict=True)
def testOverlaps(self):
other = ipaddr.IPv4Network('1.2.3.0/30')
other2 = ipaddr.IPv4Network('1.2.2.0/24')
other3 = ipaddr.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4.overlaps(other))
self.assertFalse(self.ipv4.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddr.IPv4Network(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string)
self.assertEquals(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string)
self.assertNotEquals(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'2001:1.1.1.1:1.1.1.1')
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testPacked(self):
self.assertEqual(self.ipv4.packed,
_cb('\x01\x02\x03\x04'))
self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed,
_cb('\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6.packed,
_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01'))
self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed,
_cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ '\x00' * 6))
self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed,
_cb('\x00' * 6 + '\x00\x01' + '\x00' * 8))
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.assertEquals(ipv4._ip_string_from_prefix(), '255.255.255.0')
self.assertEquals(ipv4._ip_string_from_prefix(28), '255.255.255.240')
def testIpType(self):
ipv4net = ipaddr.IPNetwork('1.2.3.4')
ipv4addr = ipaddr.IPAddress('1.2.3.4')
ipv6net = ipaddr.IPNetwork('::1.2.3.4')
ipv6addr = ipaddr.IPAddress('::1.2.3.4')
self.assertEquals(ipaddr.IPv4Network, type(ipv4net))
self.assertEquals(ipaddr.IPv4Address, type(ipv4addr))
self.assertEquals(ipaddr.IPv6Network, type(ipv6net))
self.assertEquals(ipaddr.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEquals(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast)
self.assertEquals(False, ipaddr.IPNetwork('240.0.0.0').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('192.168.1.1/17').is_private)
self.assertEquals(False, ipaddr.IPNetwork('192.169.0.0').is_private)
self.assertEquals(True, ipaddr.IPNetwork('10.255.255.255').is_private)
self.assertEquals(False, ipaddr.IPNetwork('11.0.0.0').is_private)
self.assertEquals(True, ipaddr.IPNetwork('172.31.255.255').is_private)
self.assertEquals(False, ipaddr.IPNetwork('172.32.0.0').is_private)
self.assertEquals(True,
ipaddr.IPNetwork('169.254.100.200/24').is_link_local)
self.assertEquals(False,
ipaddr.IPNetwork('169.255.100.200/24').is_link_local)
self.assertEquals(True,
ipaddr.IPNetwork('127.100.200.254/32').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('128.0.0.0').is_loopback)
# test addresses
self.assertEquals(True, ipaddr.IPAddress('224.1.1.1').is_multicast)
self.assertEquals(False, ipaddr.IPAddress('240.0.0.0').is_multicast)
self.assertEquals(True, ipaddr.IPAddress('192.168.1.1').is_private)
self.assertEquals(False, ipaddr.IPAddress('192.169.0.0').is_private)
self.assertEquals(True, ipaddr.IPAddress('10.255.255.255').is_private)
self.assertEquals(False, ipaddr.IPAddress('11.0.0.0').is_private)
self.assertEquals(True, ipaddr.IPAddress('172.31.255.255').is_private)
self.assertEquals(False, ipaddr.IPAddress('172.32.0.0').is_private)
self.assertEquals(True,
ipaddr.IPAddress('169.254.100.200').is_link_local)
self.assertEquals(False,
ipaddr.IPAddress('169.255.100.200').is_link_local)
self.assertEquals(True,
ipaddr.IPAddress('127.100.200.254').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('127.42.0.0').is_loopback)
self.assertEquals(False, ipaddr.IPAddress('128.0.0.0').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEquals(True, ipaddr.IPNetwork('ffff::').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork(2**128-1).is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('ff00::').is_multicast)
self.assertEquals(False, ipaddr.IPNetwork('fdff::').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('fecf::').is_site_local)
self.assertEquals(True, ipaddr.IPNetwork(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPNetwork('ff00::').is_site_local)
self.assertEquals(True, ipaddr.IPNetwork('fc00::').is_private)
self.assertEquals(True, ipaddr.IPNetwork(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPNetwork('fbff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPNetwork('fe00::').is_private)
self.assertEquals(True, ipaddr.IPNetwork('fea0::').is_link_local)
self.assertEquals(True, ipaddr.IPNetwork('febf:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPNetwork('fec0::').is_link_local)
self.assertEquals(True, ipaddr.IPNetwork('0:0::0:01').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::1/127').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::2').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('0::0').is_unspecified)
self.assertEquals(False, ipaddr.IPNetwork('::1').is_unspecified)
self.assertEquals(False, ipaddr.IPNetwork('::/127').is_unspecified)
# test addresses
self.assertEquals(True, ipaddr.IPAddress('ffff::').is_multicast)
self.assertEquals(True, ipaddr.IPAddress(2**128-1).is_multicast)
self.assertEquals(True, ipaddr.IPAddress('ff00::').is_multicast)
self.assertEquals(False, ipaddr.IPAddress('fdff::').is_multicast)
self.assertEquals(True, ipaddr.IPAddress('fecf::').is_site_local)
self.assertEquals(True, ipaddr.IPAddress(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPAddress('fbf:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPAddress('ff00::').is_site_local)
self.assertEquals(True, ipaddr.IPAddress('fc00::').is_private)
self.assertEquals(True, ipaddr.IPAddress(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPAddress('fbff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPAddress('fe00::').is_private)
self.assertEquals(True, ipaddr.IPAddress('fea0::').is_link_local)
self.assertEquals(True, ipaddr.IPAddress('febf:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPAddress('fec0::').is_link_local)
self.assertEquals(True, ipaddr.IPAddress('0:0::0:01').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('::1').is_loopback)
self.assertEquals(False, ipaddr.IPAddress('::2').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('0::0').is_unspecified)
self.assertEquals(False, ipaddr.IPAddress('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEquals(True, ipaddr.IPAddress('100::').is_reserved)
self.assertEquals(True, ipaddr.IPNetwork('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork('10.1.1.0/26')
addr3 = ipaddr.IPNetwork('10.2.1.0/24')
addr4 = ipaddr.IPAddress('10.1.1.0')
self.assertEqual(addr1.address_exclude(addr2),
[ipaddr.IPNetwork('10.1.1.64/26'),
ipaddr.IPNetwork('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.address_exclude, addr3)
self.assertRaises(TypeError, addr1.address_exclude, addr4)
self.assertEqual(addr1.address_exclude(addr1), [])
def testHash(self):
self.assertEquals(hash(ipaddr.IPNetwork('10.1.1.0/24')),
hash(ipaddr.IPNetwork('10.1.1.0/24')))
self.assertEquals(hash(ipaddr.IPAddress('10.1.1.0')),
hash(ipaddr.IPAddress('10.1.1.0')))
ip1 = ipaddr.IPAddress('10.1.1.0')
ip2 = ipaddr.IPAddress('1::')
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4 in dummy)
self.assertTrue(ip2 in dummy)
def testCopyConstructor(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork(addr1)
addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64')
addr4 = ipaddr.IPNetwork(addr3)
addr5 = ipaddr.IPv4Address('1.1.1.1')
addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddr.IPv4Address(addr5))
self.assertEqual(addr6, ipaddr.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0::3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
}
for uncompressed, compressed in test_addresses.items():
self.assertEquals(compressed, str(ipaddr.IPv6Network(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6Network('2001::1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001',
addr1._explode_shorthand_ip_string(str(addr1.ip)))
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001',
ipaddr.IPv6Network('::1/128').exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4))
self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6))
def testHexRepresentation(self):
self.assertEqual(hex(0x1020304),
hex(self.ipv4))
self.assertEqual(hex(0x20010658022ACAFE0200000000000001),
hex(self.ipv6))
# backwards compatibility
def testBackwardsCompability(self):
self.assertEqual(ipaddr.CollapseAddrList(
[ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]),
[ipaddr.IPNetwork('1.1.0.0/23')])
self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude(
ipaddr.IPNetwork('::42:8000/113')),
[ipaddr.IPNetwork('::42:0/113')])
self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks(
ipaddr.IPNetwork('2::/9')) < 0)
self.assertEqual(ipaddr.IPNetwork('1::/16').Contains(
ipaddr.IPNetwork('2::/16')), False)
self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(),
[ipaddr.IPNetwork('0.0.0.0/1'),
ipaddr.IPNetwork('128.0.0.0/1')])
self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(),
[ipaddr.IPNetwork('::/128'),
ipaddr.IPNetwork('::1/128')])
self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(),
ipaddr.IPNetwork('1.0.0.0/31'))
self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(),
ipaddr.IPNetwork('::/120'))
self.assertEqual(ipaddr.IPNetwork('10.0.0.02').IsRFC1918(), True)
self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False)
self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True)
self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(),
False)
def testForceVersion(self):
self.assertEqual(ipaddr.IPNetwork(1).version, 4)
self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6)
def testWithStar(self):
self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24")
self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0")
self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255")
self.assertEqual(str(self.ipv6.with_prefixlen),
'2001:658:22a:cafe:200::1/64')
# rfc3513 sec 2.3 says that ipv6 only uses cidr notation for
# subnets
self.assertEqual(str(self.ipv6.with_netmask),
'2001:658:22a:cafe:200::1/64')
# this probably don't make much sense, but it's included for
# compatability with ipv4
self.assertEqual(str(self.ipv6.with_hostmask),
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse(self.ipv4._cache.has_key('network'))
self.assertFalse(self.ipv4._cache.has_key('broadcast'))
self.assertFalse(self.ipv4._cache.has_key('hostmask'))
# V4 - populate and test
self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue(self.ipv4._cache.has_key('network'))
self.assertTrue(self.ipv4._cache.has_key('broadcast'))
self.assertTrue(self.ipv4._cache.has_key('hostmask'))
# V6 - make sure we're empty
self.assertFalse(self.ipv6._cache.has_key('network'))
self.assertFalse(self.ipv6._cache.has_key('broadcast'))
self.assertFalse(self.ipv6._cache.has_key('hostmask'))
# V6 - populate and test
self.assertEqual(self.ipv6.network,
ipaddr.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address(
'2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6.hostmask,
ipaddr.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue(self.ipv6._cache.has_key('network'))
self.assertTrue(self.ipv6._cache.has_key('broadcast'))
self.assertTrue(self.ipv6._cache.has_key('hostmask'))
def testIsValidIp(self):
ip = ipaddr.IPv6Address('::')
self.assertTrue(ip._is_valid_ip('2001:658:22a:cafe:200::1'))
self.assertTrue(ip._is_valid_ip('::ffff:10.10.0.0'))
self.assertTrue(ip._is_valid_ip('::ffff:192.168.0.0'))
self.assertFalse(ip._is_valid_ip('2001:658:22a::::1'))
self.assertFalse(ip._is_valid_ip(':658:22a:cafe:200::1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200:'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200:127.0.0.1::1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe:200::127.0.1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:zzzz:200::1'))
self.assertFalse(ip._is_valid_ip('2001:658:22a:cafe1:200::1'))
if __name__ == '__main__':
unittest.main()
|
nouiz/fredericbastien-ipaddr-py-speed-up
|
tags/2.1.2/ipaddr_test.py
|
Python
|
apache-2.0
| 45,855
|
from pkg_resources import require
require("cothread==2.14")
from cothread.catools import *
import cothread
from Generic_BPMDevice import *
from subprocess import Popen, PIPE
import numpy as np
class Electron_BPMDevice(Generic_BPMDevice):
"""Libera Electron BPM Device class that uses Epics to communicate with PVs.
All of the methods here will attempt to be generic enough to work for Libera
devices that have the same PV names. If these names change, then a different
class will have to be used. Most data is acquired using the slow acquisition
method as the tests are not intensive, for noise tests and the such, direct
access to the data buffers may be needed.
Attributes:
epicsID (str): Channel identifier string that will be used to access PVs.
"""
def _read_epics_pv (self,pv):
"""Private method to read an Epics process variable.
Wraps up caget call, makes it easy for multiple reads to be programmed
and a timeout added if required.
Args:
pv (str): Name of the Epics process variable to read.
Returns:
variant: Value of requested process variable.
"""
return caget(self.epicsID+pv) # Get PV data
def __init__(self, dev_ID):
"""Initializes the Libera BPM device object and assigns it an ID.
Args:
dev_ID (str/int): The ID number assigned to that specific BPM device.
Returns:
.
"""
if type(dev_ID) != str: # Makes sure the ID is an integer
raise TypeError # Raises a type error if integer is not used
else:
self.epicsID = dev_ID # TS-DI-EBPM-04:
pv = "SA:X" # Any PV hosts on the device could be used here
node = connect(self.epicsID + pv, cainfo=True).host.split(":")[0] # Get the IP address of the host
host_info = Popen(["arp", "-n", node], stdout=PIPE).communicate()[0] # Uses arp to get more info about the host
host_info = host_info.split("\n")[1] # Splits the data about the host
index = host_info.find(":") # Gets the first ":", used in the MAC address
host_info = host_info[index - 2:index + 15] # Gets the devices MAC address
self.macaddress = host_info
print "Opened connection to "+self.get_device_ID() # Informs the user the device is now connected to
def __del__(self):
"""Informs the user that this object has been destroyed
Args:
Returns:
"""
print "Closed connection to "+self.get_device_ID()
def get_X_position(self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: X position in mm
"""
return self._read_epics_pv("SA:X") # Reads the requested PV
def get_Y_position(self):
"""Override method, gets the calculated Y position of the beam.
Args:
Returns:
float: Y position in mm
"""
return self._read_epics_pv("SA:Y") # Reads the requested PV
def get_beam_current(self):
"""Override method, gets the beam current read by the BPMs.
Args:
Returns:
float: Current in mA
"""
return self._read_epics_pv("SA:CURRENT") # Reads the requested PV
def get_input_power(self):
"""Override method, gets the input power of the signals input to the device
Args:
Returns:
float: Input power in dBm
"""
return self._read_epics_pv("SA:POWER") # Reads the requested PV
def get_raw_BPM_buttons(self):
"""Override method, gets the raw signal from each BPM.
Args:
Returns:
float: Raw signal from BPM A
float: Raw signal from BPM B
float: Raw signal from BPM C
float: Raw signal from BPM D
"""
return (self._read_epics_pv("SA:A"),
self._read_epics_pv("SA:B"),
self._read_epics_pv("SA:C"),
self._read_epics_pv("SA:D")) # Reads the requested PVs
def get_normalised_BPM_buttons(self):
"""Override method, gets the normalised signal from each BPM.
Args:
Returns:
float: Normalised signal from BPM A
float: Normalised signal from BPM B
float: Normalised signal from BPM C
float: Normalised signal from BPM D
"""
return (self._read_epics_pv("SA:AN"),
self._read_epics_pv("SA:BN"),
self._read_epics_pv("SA:CN"),
self._read_epics_pv("SA:DN")) # Reads the requested PVs
def get_ADC_sum(self):
"""Override method, gets the sum of all of the buttons ADCs
A+B+C+D
Args:
Returns:
int: ADC sum in counts
"""
a, b, c, d = self.get_raw_BPM_buttons() # Reads the requested PVs
sum = a + b + c + d # Sums the values of the PVs
return sum
def get_device_ID(self):
"""Override method, gets the device's epics ID and MAC address
Args:
Returns:
str: Device with epics channel ID and MAC address
"""
return "Libera Electron BPM with the Epics ID "+ "\""+self.epicsID+"\" and the MAC Address \""+self.macaddress+"\""
def get_input_tolerance(self):
"""Override method, gets the maximum input power the device can take
The devices will break if the input power is too high, as such, each device has their
own tolerances, this function will return this tolerance. It should be used to ensure
that the power put into the device is not too high to break the device.
Args:
Returns:
float: max input power in dBm
"""
return -20 # The maximum continuous input power the Electron can handle in dBm
|
dharryman/BPM_Test_Framework
|
BPMDevice/Electron_BPMDevice.py
|
Python
|
apache-2.0
| 6,025
|
# Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import misc
from tempest.tests import base
@misc.singleton
class TestFoo(object):
count = 0
def increment(self):
self.count += 1
return self.count
@misc.singleton
class TestBar(object):
count = 0
def increment(self):
self.count += 1
return self.count
class TestMisc(base.TestCase):
def test_singleton(self):
test = TestFoo()
self.assertEqual(0, test.count)
self.assertEqual(1, test.increment())
test2 = TestFoo()
self.assertEqual(1, test.count)
self.assertEqual(1, test2.count)
self.assertEqual(test, test2)
test3 = TestBar()
self.assertNotEqual(test, test3)
def test_find_test_caller_test_case(self):
# Calling it from here should give us the method we're in.
self.assertEqual('TestMisc:test_find_test_caller_test_case',
misc.find_test_caller())
def test_find_test_caller_setup_self(self):
def setUp(self):
return misc.find_test_caller()
self.assertEqual('TestMisc:setUp', setUp(self))
def test_find_test_caller_setup_no_self(self):
def setUp():
return misc.find_test_caller()
self.assertEqual(':setUp', setUp())
def test_find_test_caller_setupclass_cls(self):
def setUpClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
def test_find_test_caller_teardown_self(self):
def tearDown(self):
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDown', tearDown(self))
def test_find_test_caller_teardown_no_self(self):
def tearDown():
return misc.find_test_caller()
self.assertEqual(':tearDown', tearDown())
def test_find_test_caller_teardown_class(self):
def tearDownClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDownClass',
tearDownClass(self.__class__))
|
bigswitch/tempest
|
tempest/tests/lib/common/utils/test_misc.py
|
Python
|
apache-2.0
| 2,733
|
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from ironic_lib.common.i18n import _
from ironic_lib import exception
from ironic_lib import utils
opts = [
cfg.IntOpt('check_device_interval',
default=1,
help='After Ironic has completed creating the partition table, '
'it continues to check for activity on the attached iSCSI '
'device status at this interval prior to copying the image'
' to the node, in seconds'),
cfg.IntOpt('check_device_max_retries',
default=20,
help='The maximum number of times to check that the device is '
'not accessed by another process. If the device is still '
'busy after that, the disk partitioning will be treated as'
' having failed.')
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='disk_partitioner',
title='Options for the disk partitioner')
CONF.register_group(opt_group)
CONF.register_opts(opts, opt_group)
LOG = logging.getLogger(__name__)
class DiskPartitioner(object):
def __init__(self, device, disk_label='msdos', alignment='optimal'):
"""A convenient wrapper around the parted tool.
:param device: The device path.
:param disk_label: The type of the partition table. Valid types are:
"bsd", "dvh", "gpt", "loop", "mac", "msdos",
"pc98", or "sun".
:param alignment: Set alignment for newly created partitions.
Valid types are: none, cylinder, minimal and
optimal.
"""
self._device = device
self._disk_label = disk_label
self._alignment = alignment
self._partitions = []
def _exec(self, *args):
# NOTE(lucasagomes): utils.execute() is already a wrapper on top
# of processutils.execute() which raises specific
# exceptions. It also logs any failure so we don't
# need to log it again here.
utils.execute('parted', '-a', self._alignment, '-s', self._device,
'--', 'unit', 'MiB', *args, use_standard_locale=True,
run_as_root=True)
def add_partition(self, size, part_type='primary', fs_type='',
boot_flag=None, extra_flags=None):
"""Add a partition.
:param size: The size of the partition in MiB.
:param part_type: The type of the partition. Valid values are:
primary, logical, or extended.
:param fs_type: The filesystem type. Valid types are: ext2, fat32,
fat16, HFS, linux-swap, NTFS, reiserfs, ufs.
If blank (''), it will create a Linux native
partition (83).
:param boot_flag: Boot flag that needs to be configured on the
partition. Ignored if None. It can take values
'bios_grub', 'boot'.
:param extra_flags: List of flags to set on the partition. Ignored
if None.
:returns: The partition number.
"""
self._partitions.append({'size': size,
'type': part_type,
'fs_type': fs_type,
'boot_flag': boot_flag,
'extra_flags': extra_flags})
return len(self._partitions)
def get_partitions(self):
"""Get the partitioning layout.
:returns: An iterator with the partition number and the
partition layout.
"""
return enumerate(self._partitions, 1)
def commit(self):
"""Write to the disk."""
LOG.debug("Committing partitions to disk.")
cmd_args = ['mklabel', self._disk_label]
# NOTE(lucasagomes): Lead in with 1MiB to allow room for the
# partition table itself.
start = 1
for num, part in self.get_partitions():
end = start + part['size']
cmd_args.extend(['mkpart', part['type'], part['fs_type'],
str(start), str(end)])
if part['boot_flag']:
cmd_args.extend(['set', str(num), part['boot_flag'], 'on'])
if part['extra_flags']:
for flag in part['extra_flags']:
cmd_args.extend(['set', str(num), flag, 'on'])
start = end
self._exec(*cmd_args)
try:
utils.wait_for_disk_to_become_available(self._device)
except exception.IronicException as e:
raise exception.InstanceDeployFailure(
_('Disk partitioning failed on device %(device)s. '
'Error: %(error)s')
% {'device': self._device, 'error': e})
def list_opts():
"""Entry point for oslo-config-generator."""
return [('disk_partitioner', opts)]
|
openstack/ironic-lib
|
ironic_lib/disk_partitioner.py
|
Python
|
apache-2.0
| 5,705
|
#!/usr/bin/python
import os
import sys
import pwd
import uuid
import struct
import socket
import logging
import base64
from M2Crypto import EVP, EC, util
logger = logging.getLogger("sock2proc")
class ProcInfo(object):
# Either sock (local tcp) or pid/euid (unix stream) must be present
def __init__(self, sock = -1, pid = -1, euid = -1):
self.sock = sock # used to find proc info vi proc/etc/net
self.pid = pid
self.euid = euid
self.clnt = ""
self.binh = ""
self.user = ""
self.cmd = ""
self.arg = []
self.env = {}
self.bindir = ""
self.cwd = "/"
self.root = "/"
def proc_info(self):
# Either pid or sock must be available
if (self.pid == -1):
if (self.sock ==""):
return False
else:
inode, self.euid = ProcInfo.find_inode(self.sock)
if (inode == -1):
return False
self.pid = ProcInfo.find_pid(inode)
if (self.pid == -1):
return False
self.user = pwd.getpwuid(int(self.euid))[0];
logger.info("user = %s", self.user)
fname = open("/proc/" + str(self.pid) + "/cmdline", "r")
self.cmd = fname.readline()
self.cmd = self.cmd.strip()
fname.close()
if (len(self.cmd) > 0):
argv = self.cmd.split("\0")
self.cmd = argv[0]
for i in range(1, len(argv)):
arg = argv[i].strip()
if (len(arg) > 0):
self.arg.append(arg)
#Special case where args part of command
#TODO bug if space in filename (escaping handling)
idx = self.cmd.find(" ")
if (idx > 0):
self.cmd = self.cmd[:idx].strip()
self.arg += self.cmd[(idx + 1):].split(" ")
logger.info("clnt exe: %s", self.cmd)
logger.info("clnt arg: %s", self.arg)
self.bindir, self.cmd = os.path.split(self.cmd)
logger.info("clnt bin dir: %s", self.bindir)
self.clnt = self.cmd + "~" + socket.getfqdn() + "~" + self.user
logger.info("clnt id: %s", self.clnt)
self.binh = ProcInfo.hash_file("/proc/" + str(self.pid) + "/exe")
logger.info("clnt exe hash: %s",self.binh)
fname = open("/proc/" + str(self.pid) + "/environ", "r")
envlist = fname.readline()
fname.close()
envlist = envlist.split("\0")
for i in range(0, len(envlist)) :
nv = envlist[i].split("=", 2)
if (len(nv[0]) == 0):
break
self.env[nv[0].strip()] = nv[1].strip()
logger.info("clnt env: %s", self.env)
self.cwd = os.path.realpath("/proc/" + str(self.pid) + "/cwd")
self.root = os.path.realpath("/proc/" + str(self.pid) + "/root")
return True
# Find the inode of a local sock address
@staticmethod
def find_inode(sock):
inode = -1
euid = -1;
fname = open("/proc/net/tcp", "r")
fname.readline() # discard the 1st line
while (True):
line = fname.readline();
if not line:
break
items = line.split()
if (items[1].find(sock.upper()) != -1) :
euid = items[7]
inode = items[9]
break
fname.close()
logger.info("euid = %s", euid)
if (euid == -1):
return -1, -1
return int(inode), euid
# Find the pid given its inode
@staticmethod
def find_pid(inode):
files = os.listdir("/proc/")
files.remove(str(os.getpid()))
pids = []
for f in files:
try:
integer = int(f)
pids.append(str(integer))
except ValueError:
# don't care if not a pid
pass
for pid in pids:
fds = os.listdir("/proc/%s/fd/" % pid)
for fd in fds:
if ('socket:[%d]' % inode) == os.readlink("/proc/%s/fd/%s" % (pid, fd)):
return pid
return -1
@staticmethod
def hash_file(file):
hash = ""
with open(file, 'rb') as fh:
m = EVP.MessageDigest('sha1')
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
hash = m.final()
hash = base64.urlsafe_b64encode(hash)
return hash
@staticmethod
def ip_to_nl(ip):
t = socket.inet_aton(ip)
return struct.unpack("I", t)[0]
@staticmethod
def sock2proc(client_address):
logger.info ("client sock: %s", client_address);
host, port = client_address
clnt = hex(ProcInfo.ip_to_nl(host)).lstrip('0x') + ":" + hex(int(port)).lstrip('0x')
proc = ProcInfo(sock = clnt)
proc.proc_info()
return proc
@staticmethod
def pipe2proc(pid, euid):
logger.info ("client pid: %s, euid: %s", pid, euid);
proc = ProcInfo(pid = pid, euid = euid)
proc.proc_info()
return proc
# Entrance for stand-alone execution
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s', level=logging.INFO)
proc = ProcInfo.pipe2proc(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
|
cloudxaas/cloudauth
|
lib/sock2proc.py
|
Python
|
apache-2.0
| 5,686
|
import socket
from flaky import flaky
import mock
from event_consumer import message_handler
from event_consumer import handlers as ec
from .base import BaseConsumerIntegrationTest
class ConsumeMessageHandlerTest(BaseConsumerIntegrationTest):
@flaky(max_runs=5, min_passes=5)
def test_consume_basic(self):
"""
Should run the wrapped function when a message arrives with its routing key.
"""
with mock.patch.object(ec, 'REGISTRY', new=dict()) as reg:
f1 = message_handler('my.routing.key1')(
mock.MagicMock(__name__='mock_handler1')
)
f2 = message_handler('my.routing.key2')(
mock.MagicMock(__name__='mock_handler2')
)
assert len(reg) == 2
self.configure_handlers()
assert len(self.handlers) == len(reg)
h1 = self.get_handlers_for_key('my.routing.key1')[0]
h2 = self.get_handlers_for_key('my.routing.key2')[0]
p1 = self.get_producer(h1)
p2 = self.get_producer(h2)
body1 = self.body()
body2 = self.body()
p1.publish(body1)
p2.publish(body2)
for _ in range(2):
self.connection.drain_events(timeout=0.3)
f1.assert_called_once_with(body1)
f2.assert_called_once_with(body2)
# no retries:
e = None
try:
self.connection.drain_events(timeout=0.3)
except socket.timeout as exc:
e = exc
self.assertIsNotNone(e, msg="e=None here means task was unexpectedly retried")
# no further calls
f1.call_count = 1
f2.call_count = 1
@flaky(max_runs=5, min_passes=5)
def test_consume_custom_queue_name(self):
"""
Should run the wrapped function when a message arrives with its routing key.
Test that we can connect multiple routing keys on the same queue and the
appropriate handler will be called in each case.
"""
with mock.patch.object(ec, 'REGISTRY', new=dict()) as reg:
# we have to use a named exchange to be able to bind a custom queue name
f1 = message_handler('my.routing.key1', queue='custom_queue', exchange='custom')(
mock.MagicMock(__name__='mock_handler1')
)
assert len(reg) == 1
self.configure_handlers()
assert len(self.handlers) == len(reg)
h1 = self.get_handlers_for_key('my.routing.key1')[0]
p1 = self.get_producer(h1)
body1 = self.body()
p1.publish(body1)
self.connection.drain_events(timeout=0.3)
f1.assert_called_once_with(body1)
# no retries:
e = None
try:
self.connection.drain_events(timeout=0.3)
except socket.timeout as exc:
e = exc
self.assertIsNotNone(e, msg="e=None here means task was unexpectedly retried")
# no further calls
f1.call_count = 1
@flaky(max_runs=5, min_passes=5)
def test_consume_wildcard_route(self):
"""
Should run the wrapped function when a message arrives with its routing key.
Test that we can connect multiple routing keys on the same queue and the
appropriate handler will be called in each case.
"""
with mock.patch.object(ec, 'REGISTRY', new=dict()) as reg:
f1 = message_handler('my.routing.*', exchange='custom')(
mock.MagicMock(__name__='mock_handler1')
)
assert len(reg) == 1
self.configure_handlers()
assert len(self.handlers) == len(reg)
h1 = self.get_handlers_for_key('my.routing.*')[0]
p1 = self.get_producer(h1, 'my.routing.key1')
p2 = self.get_producer(h1, 'my.routing.key2')
body1 = self.body()
body2 = self.body()
p1.publish(body1)
p2.publish(body2)
for _ in range(2):
self.connection.drain_events(timeout=0.3)
f1.assert_has_calls([mock.call(body1), mock.call(body2)], any_order=True)
# no retries:
e = None
try:
self.connection.drain_events(timeout=0.3)
except socket.timeout as exc:
e = exc
self.assertIsNotNone(e, msg="e=None here means task was unexpectedly retried")
# no further calls
f1.call_count = 2
@flaky(max_runs=5, min_passes=5)
def test_consume_multiple_routes(self):
"""
Should run the wrapped function when a message arrives with its routing key.
Test that we can connect multiple routing keys on the same queue and the
appropriate handler will be called in each case.
"""
with mock.patch.object(ec, 'REGISTRY', new=dict()) as reg:
decorator = message_handler(
['my.routing.key1', 'my.routing.key2'],
exchange='custom',
)
f1 = decorator(mock.MagicMock(__name__='mock_handler1'))
assert len(reg) == 2
self.configure_handlers()
assert len(self.handlers) == len(reg)
h1 = self.get_handlers_for_key('my.routing.key1')[0]
h2 = self.get_handlers_for_key('my.routing.key2')[0]
p1 = self.get_producer(h1)
p2 = self.get_producer(h2)
body1 = self.body()
body2 = self.body()
p1.publish(body1)
p2.publish(body2)
for _ in range(2):
self.connection.drain_events(timeout=0.3)
f1.assert_has_calls([mock.call(body1), mock.call(body2)], any_order=True)
# no retries:
e = None
try:
self.connection.drain_events(timeout=0.3)
except socket.timeout as exc:
e = exc
self.assertIsNotNone(e, msg="e=None here means task was unexpectedly retried")
# no further calls
f1.call_count = 2
|
depop/celery-message-consumer
|
tests/test_consume_handler.py
|
Python
|
apache-2.0
| 6,210
|
"""Voluptuous schemas for the KNX integration."""
from __future__ import annotations
from abc import ABC
from collections import OrderedDict
from typing import Any, ClassVar, Final
import voluptuous as vol
from xknx import XKNX
from xknx.devices.climate import SetpointShiftMode
from xknx.dpt import DPTBase, DPTNumeric
from xknx.exceptions import CouldNotParseAddress
from xknx.io import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from xknx.telegram.address import IndividualAddress, parse_device_group_address
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_DEVICE_CLASSES,
)
from homeassistant.components.climate.const import HVAC_MODE_HEAT, HVAC_MODES
from homeassistant.components.cover import DEVICE_CLASSES as COVER_DEVICE_CLASSES
from homeassistant.components.number.const import MODE_AUTO, MODE_BOX, MODE_SLIDER
from homeassistant.components.sensor import CONF_STATE_CLASS, STATE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_CATEGORY,
CONF_ENTITY_ID,
CONF_HOST,
CONF_MODE,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ENTITY_CATEGORIES_SCHEMA
from .const import (
CONF_INVERT,
CONF_KNX_EXPOSE,
CONF_KNX_INDIVIDUAL_ADDRESS,
CONF_KNX_ROUTING,
CONF_KNX_TUNNELING,
CONF_RESET_AFTER,
CONF_RESPOND_TO_READ,
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
CONTROLLER_MODES,
KNX_ADDRESS,
PRESET_MODES,
ColorTempModes,
SupportedPlatforms,
)
##################
# KNX VALIDATORS
##################
def ga_validator(value: Any) -> str | int:
"""Validate that value is parsable as GroupAddress or InternalGroupAddress."""
if isinstance(value, (str, int)):
try:
parse_device_group_address(value)
return value
except CouldNotParseAddress:
pass
raise vol.Invalid(
f"value '{value}' is not a valid KNX group address '<main>/<middle>/<sub>', '<main>/<sub>' "
"or '<free>' (eg.'1/2/3', '9/234', '123'), nor xknx internal address 'i-<string>'."
)
ga_list_validator = vol.All(cv.ensure_list, [ga_validator])
ia_validator = vol.Any(
cv.matches_regex(IndividualAddress.ADDRESS_RE.pattern),
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
msg="value does not match pattern for KNX individual address '<area>.<line>.<device>' (eg.'1.1.100')",
)
def number_limit_sub_validator(entity_config: OrderedDict) -> OrderedDict:
"""Validate a number entity configurations dependent on configured value type."""
value_type = entity_config[CONF_TYPE]
min_config: float | None = entity_config.get(NumberSchema.CONF_MIN)
max_config: float | None = entity_config.get(NumberSchema.CONF_MAX)
step_config: float | None = entity_config.get(NumberSchema.CONF_STEP)
dpt_class = DPTNumeric.parse_transcoder(value_type)
if dpt_class is None:
raise vol.Invalid(f"'type: {value_type}' is not a valid numeric sensor type.")
# Inifinity is not supported by Home Assistant frontend so user defined
# config is required if if xknx DPTNumeric subclass defines it as limit.
if min_config is None and dpt_class.value_min == float("-inf"):
raise vol.Invalid(f"'min' key required for value type '{value_type}'")
if min_config is not None and min_config < dpt_class.value_min:
raise vol.Invalid(
f"'min: {min_config}' undercuts possible minimum"
f" of value type '{value_type}': {dpt_class.value_min}"
)
if max_config is None and dpt_class.value_max == float("inf"):
raise vol.Invalid(f"'max' key required for value type '{value_type}'")
if max_config is not None and max_config > dpt_class.value_max:
raise vol.Invalid(
f"'max: {max_config}' exceeds possible maximum"
f" of value type '{value_type}': {dpt_class.value_max}"
)
if step_config is not None and step_config < dpt_class.resolution:
raise vol.Invalid(
f"'step: {step_config}' undercuts possible minimum step"
f" of value type '{value_type}': {dpt_class.resolution}"
)
return entity_config
def numeric_type_validator(value: Any) -> str | int:
"""Validate that value is parsable as numeric sensor type."""
if isinstance(value, (str, int)) and DPTNumeric.parse_transcoder(value) is not None:
return value
raise vol.Invalid(f"value '{value}' is not a valid numeric sensor type.")
def select_options_sub_validator(entity_config: OrderedDict) -> OrderedDict:
"""Validate a select entity options configuration."""
options_seen = set()
payloads_seen = set()
payload_length = entity_config[SelectSchema.CONF_PAYLOAD_LENGTH]
if payload_length == 0:
max_payload = 0x3F
else:
max_payload = 256 ** payload_length - 1
for opt in entity_config[SelectSchema.CONF_OPTIONS]:
option = opt[SelectSchema.CONF_OPTION]
payload = opt[SelectSchema.CONF_PAYLOAD]
if payload > max_payload:
raise vol.Invalid(
f"'payload: {payload}' for 'option: {option}' exceeds possible"
f" maximum of 'payload_length: {payload_length}': {max_payload}"
)
if option in options_seen:
raise vol.Invalid(f"duplicate item for 'option' not allowed: {option}")
options_seen.add(option)
if payload in payloads_seen:
raise vol.Invalid(f"duplicate item for 'payload' not allowed: {payload}")
payloads_seen.add(payload)
return entity_config
def sensor_type_validator(value: Any) -> str | int:
"""Validate that value is parsable as sensor type."""
if isinstance(value, (str, int)) and DPTBase.parse_transcoder(value) is not None:
return value
raise vol.Invalid(f"value '{value}' is not a valid sensor type.")
sync_state_validator = vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.matches_regex(r"^(init|expire|every)( \d*)?$"),
)
##############
# CONNECTION
##############
class ConnectionSchema:
"""Voluptuous schema for KNX connection."""
CONF_KNX_LOCAL_IP = "local_ip"
CONF_KNX_MCAST_GRP = "multicast_group"
CONF_KNX_MCAST_PORT = "multicast_port"
CONF_KNX_RATE_LIMIT = "rate_limit"
CONF_KNX_ROUTE_BACK = "route_back"
CONF_KNX_STATE_UPDATER = "state_updater"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PORT, default=DEFAULT_MCAST_PORT): cv.port,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_KNX_ROUTE_BACK, default=False): cv.boolean,
}
)
ROUTING_SCHEMA = vol.Maybe(vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string}))
SCHEMA = {
vol.Exclusive(CONF_KNX_ROUTING, "connection_type"): ROUTING_SCHEMA,
vol.Exclusive(CONF_KNX_TUNNELING, "connection_type"): TUNNELING_SCHEMA,
vol.Optional(
CONF_KNX_INDIVIDUAL_ADDRESS, default=XKNX.DEFAULT_ADDRESS
): ia_validator,
vol.Optional(CONF_KNX_MCAST_GRP, default=DEFAULT_MCAST_GRP): cv.string,
vol.Optional(CONF_KNX_MCAST_PORT, default=DEFAULT_MCAST_PORT): cv.port,
vol.Optional(CONF_KNX_STATE_UPDATER, default=True): cv.boolean,
vol.Optional(CONF_KNX_RATE_LIMIT, default=20): vol.All(
vol.Coerce(int), vol.Range(min=1, max=100)
),
}
#############
# PLATFORMS
#############
class KNXPlatformSchema(ABC):
"""Voluptuous schema for KNX platform entity configuration."""
PLATFORM_NAME: ClassVar[str]
ENTITY_SCHEMA: ClassVar[vol.Schema]
@classmethod
def platform_node(cls) -> dict[vol.Optional, vol.All]:
"""Return a schema node for the platform."""
return {
vol.Optional(cls.PLATFORM_NAME): vol.All(
cv.ensure_list, [cls.ENTITY_SCHEMA]
)
}
class BinarySensorSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX binary sensors."""
PLATFORM_NAME = SupportedPlatforms.BINARY_SENSOR.value
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_INVERT = CONF_INVERT
CONF_IGNORE_INTERNAL_STATE = "ignore_internal_state"
CONF_CONTEXT_TIMEOUT = "context_timeout"
CONF_RESET_AFTER = CONF_RESET_AFTER
DEFAULT_NAME = "KNX Binary Sensor"
ENTITY_SCHEMA = vol.All(
# deprecated since September 2020
cv.deprecated("significant_bit"),
cv.deprecated("automation"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_IGNORE_INTERNAL_STATE, default=False): cv.boolean,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Required(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTEXT_TIMEOUT): vol.All(
vol.Coerce(float), vol.Range(min=0, max=10)
),
vol.Optional(CONF_DEVICE_CLASS): vol.In(BINARY_SENSOR_DEVICE_CLASSES),
vol.Optional(CONF_RESET_AFTER): cv.positive_float,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
class ClimateSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX climate devices."""
PLATFORM_NAME = SupportedPlatforms.CLIMATE.value
CONF_ACTIVE_STATE_ADDRESS = "active_state_address"
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_MODE = "setpoint_shift_mode"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_COMMAND_VALUE_STATE_ADDRESS = "command_value_state_address"
CONF_HEAT_COOL_ADDRESS = "heat_cool_address"
CONF_HEAT_COOL_STATE_ADDRESS = "heat_cool_state_address"
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = (
"operation_mode_frost_protection_address"
)
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODE_STANDBY_ADDRESS = "operation_mode_standby_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_CONTROLLER_MODES = "controller_modes"
CONF_DEFAULT_CONTROLLER_MODE = "default_controller_mode"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_ON_OFF_INVERT = "on_off_invert"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_MODE = "DPT6010"
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
DEFAULT_ON_OFF_INVERT = False
ENTITY_SCHEMA = vol.All(
# deprecated since September 2020
cv.deprecated("setpoint_shift_step", replacement_key=CONF_TEMPERATURE_STEP),
# deprecated since 2021.6
cv.deprecated("create_temperature_sensors"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX
): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(
CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN
): vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(
CONF_TEMPERATURE_STEP, default=DEFAULT_TEMPERATURE_STEP
): vol.All(float, vol.Range(min=0, max=2)),
vol.Required(CONF_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Inclusive(
CONF_SETPOINT_SHIFT_ADDRESS,
"setpoint_shift",
msg="'setpoint_shift_address' and 'setpoint_shift_state_address' "
"are required for setpoint_shift configuration",
): ga_list_validator,
vol.Inclusive(
CONF_SETPOINT_SHIFT_STATE_ADDRESS,
"setpoint_shift",
msg="'setpoint_shift_address' and 'setpoint_shift_state_address' "
"are required for setpoint_shift configuration",
): ga_list_validator,
vol.Optional(CONF_SETPOINT_SHIFT_MODE): vol.Maybe(
vol.All(vol.Upper, cv.enum(SetpointShiftMode))
),
vol.Optional(CONF_ACTIVE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_COMMAND_VALUE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_HEAT_COOL_ADDRESS): ga_list_validator,
vol.Optional(CONF_HEAT_COOL_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS
): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_STANDBY_ADDRESS): ga_list_validator,
vol.Optional(CONF_ON_OFF_ADDRESS): ga_list_validator,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_ON_OFF_INVERT, default=DEFAULT_ON_OFF_INVERT
): cv.boolean,
vol.Optional(CONF_OPERATION_MODES): vol.All(
cv.ensure_list, [vol.In(PRESET_MODES)]
),
vol.Optional(CONF_CONTROLLER_MODES): vol.All(
cv.ensure_list, [vol.In(CONTROLLER_MODES)]
),
vol.Optional(
CONF_DEFAULT_CONTROLLER_MODE, default=HVAC_MODE_HEAT
): vol.In(HVAC_MODES),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
class CoverSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX covers."""
PLATFORM_NAME = SupportedPlatforms.COVER.value
CONF_MOVE_LONG_ADDRESS = "move_long_address"
CONF_MOVE_SHORT_ADDRESS = "move_short_address"
CONF_STOP_ADDRESS = "stop_address"
CONF_POSITION_ADDRESS = "position_address"
CONF_POSITION_STATE_ADDRESS = "position_state_address"
CONF_ANGLE_ADDRESS = "angle_address"
CONF_ANGLE_STATE_ADDRESS = "angle_state_address"
CONF_TRAVELLING_TIME_DOWN = "travelling_time_down"
CONF_TRAVELLING_TIME_UP = "travelling_time_up"
CONF_INVERT_POSITION = "invert_position"
CONF_INVERT_ANGLE = "invert_angle"
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = "KNX Cover"
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(
vol.Any(CONF_MOVE_LONG_ADDRESS, CONF_POSITION_ADDRESS),
msg=f"At least one of '{CONF_MOVE_LONG_ADDRESS}' or '{CONF_POSITION_ADDRESS}' is required.",
): object,
},
extra=vol.ALLOW_EXTRA,
),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): ga_list_validator,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): ga_list_validator,
vol.Optional(CONF_STOP_ADDRESS): ga_list_validator,
vol.Optional(CONF_POSITION_ADDRESS): ga_list_validator,
vol.Optional(CONF_POSITION_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ANGLE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME
): cv.positive_float,
vol.Optional(
CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME
): cv.positive_float,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): vol.In(COVER_DEVICE_CLASSES),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
class ExposeSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX exposures."""
PLATFORM_NAME = CONF_KNX_EXPOSE
CONF_KNX_EXPOSE_TYPE = CONF_TYPE
CONF_KNX_EXPOSE_ATTRIBUTE = "attribute"
CONF_KNX_EXPOSE_BINARY = "binary"
CONF_KNX_EXPOSE_DEFAULT = "default"
EXPOSE_TIME_TYPES: Final = [
"time",
"date",
"datetime",
]
EXPOSE_TIME_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.All(
cv.string, str.lower, vol.In(EXPOSE_TIME_TYPES)
),
vol.Required(KNX_ADDRESS): ga_validator,
}
)
EXPOSE_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.Any(
CONF_KNX_EXPOSE_BINARY, sensor_type_validator
),
vol.Required(KNX_ADDRESS): ga_validator,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_KNX_EXPOSE_ATTRIBUTE): cv.string,
vol.Optional(CONF_KNX_EXPOSE_DEFAULT): cv.match_all,
}
)
ENTITY_SCHEMA = vol.Any(EXPOSE_SENSOR_SCHEMA, EXPOSE_TIME_SCHEMA)
class FanSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX fans."""
PLATFORM_NAME = SupportedPlatforms.FAN.value
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_OSCILLATION_ADDRESS = "oscillation_address"
CONF_OSCILLATION_STATE_ADDRESS = "oscillation_state_address"
CONF_MAX_STEP = "max_step"
DEFAULT_NAME = "KNX Fan"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OSCILLATION_ADDRESS): ga_list_validator,
vol.Optional(CONF_OSCILLATION_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MAX_STEP): cv.byte,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class LightSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX lights."""
PLATFORM_NAME = SupportedPlatforms.LIGHT.value
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_BRIGHTNESS_ADDRESS = "brightness_address"
CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address"
CONF_COLOR_ADDRESS = "color_address"
CONF_COLOR_STATE_ADDRESS = "color_state_address"
CONF_COLOR_TEMP_ADDRESS = "color_temperature_address"
CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address"
CONF_COLOR_TEMP_MODE = "color_temperature_mode"
CONF_HUE_ADDRESS = "hue_address"
CONF_HUE_STATE_ADDRESS = "hue_state_address"
CONF_RGBW_ADDRESS = "rgbw_address"
CONF_RGBW_STATE_ADDRESS = "rgbw_state_address"
CONF_SATURATION_ADDRESS = "saturation_address"
CONF_SATURATION_STATE_ADDRESS = "saturation_state_address"
CONF_XYY_ADDRESS = "xyy_address"
CONF_XYY_STATE_ADDRESS = "xyy_state_address"
CONF_MIN_KELVIN = "min_kelvin"
CONF_MAX_KELVIN = "max_kelvin"
DEFAULT_NAME = "KNX Light"
DEFAULT_COLOR_TEMP_MODE = "absolute"
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
CONF_INDIVIDUAL_COLORS = "individual_colors"
CONF_RED = "red"
CONF_GREEN = "green"
CONF_BLUE = "blue"
CONF_WHITE = "white"
_hs_color_inclusion_msg = (
"'hue_address', 'saturation_address' and 'brightness_address'"
" are required for hs_color configuration"
)
HS_COLOR_SCHEMA = {
vol.Optional(CONF_HUE_ADDRESS): ga_list_validator,
vol.Optional(CONF_HUE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_SATURATION_ADDRESS): ga_list_validator,
vol.Optional(CONF_SATURATION_STATE_ADDRESS): ga_list_validator,
}
INDIVIDUAL_COLOR_SCHEMA = vol.Schema(
{
vol.Optional(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Required(CONF_BRIGHTNESS_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): ga_list_validator,
}
)
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): ga_list_validator,
vol.Exclusive(CONF_INDIVIDUAL_COLORS, "color"): {
vol.Inclusive(
CONF_RED,
"individual_colors",
msg="'red', 'green' and 'blue' are required for individual colors configuration",
): INDIVIDUAL_COLOR_SCHEMA,
vol.Inclusive(
CONF_GREEN,
"individual_colors",
msg="'red', 'green' and 'blue' are required for individual colors configuration",
): INDIVIDUAL_COLOR_SCHEMA,
vol.Inclusive(
CONF_BLUE,
"individual_colors",
msg="'red', 'green' and 'blue' are required for individual colors configuration",
): INDIVIDUAL_COLOR_SCHEMA,
vol.Optional(CONF_WHITE): INDIVIDUAL_COLOR_SCHEMA,
},
vol.Exclusive(CONF_COLOR_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_COLOR_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): ga_list_validator,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE
): vol.All(vol.Upper, cv.enum(ColorTempModes)),
**HS_COLOR_SCHEMA,
vol.Exclusive(CONF_RGBW_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_RGBW_STATE_ADDRESS): ga_list_validator,
vol.Exclusive(CONF_XYY_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_XYY_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
vol.Any(
vol.Schema(
{vol.Required(KNX_ADDRESS): object},
extra=vol.ALLOW_EXTRA,
),
vol.Schema( # brightness addresses are required in INDIVIDUAL_COLOR_SCHEMA
{vol.Required(CONF_INDIVIDUAL_COLORS): object},
extra=vol.ALLOW_EXTRA,
),
msg="either 'address' or 'individual_colors' is required",
),
vol.Any(
vol.Schema( # 'brightness' is non-optional for hs-color
{
vol.Inclusive(
CONF_BRIGHTNESS_ADDRESS, "hs_color", msg=_hs_color_inclusion_msg
): object,
vol.Inclusive(
CONF_HUE_ADDRESS, "hs_color", msg=_hs_color_inclusion_msg
): object,
vol.Inclusive(
CONF_SATURATION_ADDRESS, "hs_color", msg=_hs_color_inclusion_msg
): object,
},
extra=vol.ALLOW_EXTRA,
),
vol.Schema( # hs-colors not used
{
vol.Optional(CONF_HUE_ADDRESS): None,
vol.Optional(CONF_SATURATION_ADDRESS): None,
},
extra=vol.ALLOW_EXTRA,
),
msg=_hs_color_inclusion_msg,
),
)
class NotifySchema(KNXPlatformSchema):
"""Voluptuous schema for KNX notifications."""
PLATFORM_NAME = SupportedPlatforms.NOTIFY.value
DEFAULT_NAME = "KNX Notify"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_validator,
}
)
class NumberSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX numbers."""
PLATFORM_NAME = SupportedPlatforms.NUMBER.value
CONF_MAX = "max"
CONF_MIN = "min"
CONF_STEP = "step"
DEFAULT_NAME = "KNX Number"
NUMBER_MODES: Final = [MODE_AUTO, MODE_BOX, MODE_SLIDER]
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_RESPOND_TO_READ, default=False): cv.boolean,
vol.Optional(CONF_MODE, default=MODE_AUTO): vol.In(NUMBER_MODES),
vol.Required(CONF_TYPE): numeric_type_validator,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MAX): vol.Coerce(float),
vol.Optional(CONF_MIN): vol.Coerce(float),
vol.Optional(CONF_STEP): cv.positive_float,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
number_limit_sub_validator,
)
class SceneSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX scenes."""
PLATFORM_NAME = SupportedPlatforms.SCENE.value
CONF_SCENE_NUMBER = "scene_number"
DEFAULT_NAME = "KNX SCENE"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Required(CONF_SCENE_NUMBER): vol.All(
vol.Coerce(int), vol.Range(min=1, max=64)
),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class SelectSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX selects."""
PLATFORM_NAME = SupportedPlatforms.SELECT.value
CONF_OPTION = "option"
CONF_OPTIONS = "options"
CONF_PAYLOAD = "payload"
CONF_PAYLOAD_LENGTH = "payload_length"
DEFAULT_NAME = "KNX Select"
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_RESPOND_TO_READ, default=False): cv.boolean,
vol.Required(CONF_PAYLOAD_LENGTH): vol.All(
vol.Coerce(int), vol.Range(min=0, max=14)
),
vol.Required(CONF_OPTIONS): [
{
vol.Required(CONF_OPTION): vol.Coerce(str),
vol.Required(CONF_PAYLOAD): cv.positive_int,
}
],
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
select_options_sub_validator,
)
class SensorSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX sensors."""
PLATFORM_NAME = SupportedPlatforms.SENSOR.value
CONF_ALWAYS_CALLBACK = "always_callback"
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
DEFAULT_NAME = "KNX Sensor"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_ALWAYS_CALLBACK, default=False): cv.boolean,
vol.Optional(CONF_STATE_CLASS): STATE_CLASSES_SCHEMA,
vol.Required(CONF_TYPE): sensor_type_validator,
vol.Required(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class SwitchSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX switches."""
PLATFORM_NAME = SupportedPlatforms.SWITCH.value
CONF_INVERT = CONF_INVERT
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
DEFAULT_NAME = "KNX Switch"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Optional(CONF_RESPOND_TO_READ, default=False): cv.boolean,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class WeatherSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX weather station."""
PLATFORM_NAME = SupportedPlatforms.WEATHER.value
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_KNX_TEMPERATURE_ADDRESS = "address_temperature"
CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS = "address_brightness_south"
CONF_KNX_BRIGHTNESS_EAST_ADDRESS = "address_brightness_east"
CONF_KNX_BRIGHTNESS_WEST_ADDRESS = "address_brightness_west"
CONF_KNX_BRIGHTNESS_NORTH_ADDRESS = "address_brightness_north"
CONF_KNX_WIND_SPEED_ADDRESS = "address_wind_speed"
CONF_KNX_WIND_BEARING_ADDRESS = "address_wind_bearing"
CONF_KNX_RAIN_ALARM_ADDRESS = "address_rain_alarm"
CONF_KNX_FROST_ALARM_ADDRESS = "address_frost_alarm"
CONF_KNX_WIND_ALARM_ADDRESS = "address_wind_alarm"
CONF_KNX_DAY_NIGHT_ADDRESS = "address_day_night"
CONF_KNX_AIR_PRESSURE_ADDRESS = "address_air_pressure"
CONF_KNX_HUMIDITY_ADDRESS = "address_humidity"
DEFAULT_NAME = "KNX Weather Station"
ENTITY_SCHEMA = vol.All(
# deprecated since 2021.6
cv.deprecated("create_sensors"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Required(CONF_KNX_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_EAST_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_WEST_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_NORTH_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_SPEED_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_BEARING_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_RAIN_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_FROST_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_DAY_NIGHT_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_AIR_PRESSURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_HUMIDITY_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
|
aronsky/home-assistant
|
homeassistant/components/knx/schema.py
|
Python
|
apache-2.0
| 33,511
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
app = Flask(__name__)
from client import ThriftClient
import struct
def _encode_int(n):
"""
This encodes an integer as a string of bytes in a 4-byte
big-endian format for efficient network transfer and HBase storage
:param n: the integer to encode
:return: A string of 4 bytes representing the integer
"""
return struct.pack(">i", n)
def _decode_int(s):
"""
Decodes the 4-byte strings representing a 4 byte big endian integer
into an int.
:param s: A 4 byte string representing an integer
:return: The integer the string passed represents
"""
return struct.unpack('>i', s)[0]
@app.route('/<table>/<key>/<column>/<type_>', methods=['GET', 'POST', 'DELETE'])
def get(table, key, column, type_):
"""
Handle our incoming REST requests to interface with Bigtable.
For POST or DELETE, we dispatch to other private methods.
For GET, we handle right in this method.
:param table: The table name we would like to interface with
:param key: The row key of the row we are getting or mutating
:param column: The fully qualified column name, including the column family
prefix in the standard cf:column_name format
:param type_: 'str' to store the byte string directly, or 'int' to
parse the string as an integer and store it as an integer
:return: A string indicating the result of the call.
"""
if request.method == 'POST':
_put(table, key, column, request.get_data(), type_)
return "Updated."
if request.method == 'DELETE':
_delete_column(table, key, column)
return "Deleted."
with ThriftClient() as client:
value = client.get_row(table, key)
if not value:
return "Not found"
value = value[0].columns[column].value
if type_ == 'int':
value = _decode_int(value)
return str(value)
def _put(table, row_key, column, value, type_):
""" Puts a cell in an Hbase row
:param table: The name of the table
:param row_key: The key of the row we want to put a value in
:param column: The column name including the column family with
the colon format, such as 'cf:count'
:param value: The array of bytes (using Python's string type)
to insert as the value for this cell
:param type_: 'str' or 'int'. If int, it will be serialized a
4 byte stream.
:return: None
"""
with ThriftClient() as client:
if type_ == 'int':
value = _encode_int(int(value))
client.put_row(table, row_key, column, value)
def _delete_column(table, row_key, column):
"""
Deletes a column from a row, and the whole row if it's the only column
:param table: The name of the table
:param row_key: The key of the row we want to put a value in
:param column: The column name including the column family with the colon
format, such as 'cf:count'
:return: None
"""
with ThriftClient() as client:
client.delete_column(table, row_key, column)
# set debug=True in the run() call to see better debugging messages
if __name__ == '__main__':
app.run()
|
nlpraveennl/cloud-bigtable-examples
|
python/thrift/flask_thrift.py
|
Python
|
apache-2.0
| 3,774
|
# ~*~ coding: utf-8 ~*~
__author__ = 'Kamo Petrosyan'
"""
Django settings for virtenviro project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# Application definition
# Change SITE_ID in your sample_settings.py
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'mptt',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'filebrowser',
'datetimewidget',
'virtenviro',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
#'django.middleware.cache.CacheMiddleware',
)
#CACHE_BACKEND = 'file:///tmp/django_cache'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'ru'
LANGUAGES = (
('ru', 'Russian'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_ORDER_STATUS = u'Не подтвержденная'
|
Haikson/virtenviro
|
virtenviro/required_settings.py
|
Python
|
apache-2.0
| 2,252
|
raise AssertionError(
'should not be executed as name starts with an underscore'
)
|
DramaFever/sst
|
src/sst/selftests/_ignored.py
|
Python
|
apache-2.0
| 87
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"FieldErrorEnum",},
)
class FieldErrorEnum(proto.Message):
r"""Container for enum describing possible field errors.
"""
class FieldError(proto.Enum):
r"""Enum describing possible field errors."""
UNSPECIFIED = 0
UNKNOWN = 1
REQUIRED = 2
IMMUTABLE_FIELD = 3
INVALID_VALUE = 4
VALUE_MUST_BE_UNSET = 5
REQUIRED_NONEMPTY_LIST = 6
FIELD_CANNOT_BE_CLEARED = 7
BLOCKED_VALUE = 9
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/errors/types/field_error.py
|
Python
|
apache-2.0
| 1,275
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetConversation
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Conversations_GetConversation_async]
from google.cloud import dialogflow_v2
async def sample_get_conversation():
# Create a client
client = dialogflow_v2.ConversationsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.GetConversationRequest(
name="name_value",
)
# Make the request
response = await client.get_conversation(request=request)
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Conversations_GetConversation_async]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_conversations_get_conversation_async.py
|
Python
|
apache-2.0
| 1,511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
from setuptools import find_packages, setup # pytype: disable=import-error
# Package meta-data.
NAME = 'importlab'
DESCRIPTION = 'A library to calculate python dependency graphs.'
URL = 'https://github.com/google/importlab'
EMAIL = 'pytype-dev@google.com'
AUTHOR = 'Google Inc.'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.7'
REQUIRED = [
'networkx>=2',
]
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
PACKAGES = find_packages(exclude=('tests',))
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
maintainer=AUTHOR,
maintainer_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=PACKAGES,
scripts=['bin/importlab'],
install_requires=REQUIRED,
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development',
],
)
|
google/importlab
|
setup.py
|
Python
|
apache-2.0
| 1,658
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import services
LOG = logging.getLogger(__name__)
class TestSwiftBasicOps(manager.OfficialClientTest):
"""
Test swift with the follow operations:
* get swift stat.
* create container.
* upload a file to the created container.
* list container's objects and assure that the uploaded file is present.
* delete object from container.
* list container's objects and assure that the deleted file is gone.
* delete a container.
* list containers and assure that the deleted container is gone.
"""
@classmethod
def setUpClass(cls):
cls.set_network_resources()
super(TestSwiftBasicOps, cls).setUpClass()
if not cls.config.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
def _get_swift_stat(self):
"""get swift status for our user account."""
self.object_storage_client.get_account()
LOG.debug('Swift status information obtained successfully')
def _create_container(self, container_name=None):
name = container_name or rand_name('swift-scenario-container')
self.object_storage_client.put_container(name)
# look for the container to assure it is created
self._list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
return name
def _delete_container(self, container_name):
self.object_storage_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
def _upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or rand_name('swift-scenario-object')
self.object_storage_client.put_object(container_name, obj_name,
rand_name('obj_data'),
content_type='text/plain')
return obj_name
def _delete_object(self, container_name, filename):
self.object_storage_client.delete_object(container_name, filename)
self._list_and_check_container_objects(container_name,
not_present_obj=[filename])
def _list_and_check_container_objects(self, container_name, present_obj=[],
not_present_obj=[]):
"""
List objects for a given container and assert which are present and
which are not.
"""
meta, response = self.object_storage_client.get_container(
container_name)
# create a list with file name only
object_list = [obj['name'] for obj in response]
if present_obj:
for obj in present_obj:
self.assertIn(obj, object_list)
if not_present_obj:
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
@services('object')
def test_swift_basic_ops(self):
self._get_swift_stat()
container_name = self._create_container()
obj_name = self._upload_object_to_container(container_name)
self._list_and_check_container_objects(container_name, [obj_name])
self._delete_object(container_name, obj_name)
self._delete_container(container_name)
|
BeenzSyed/tempest
|
tempest/scenario/test_swift_basic_ops.py
|
Python
|
apache-2.0
| 4,130
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
policy_data = """
{
"admin_api": "is_admin:True",
"cells_scheduler_filter:TargetCellFilter": "is_admin:True",
"context_is_admin": "role:admin or role:administrator",
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
"compute:get": "",
"compute:get_all": "",
"compute:get_all_tenants": "",
"compute:update": "",
"compute:get_instance_metadata": "",
"compute:get_all_instance_metadata": "",
"compute:update_instance_metadata": "",
"compute:delete_instance_metadata": "",
"compute:get_instance_faults": "",
"compute:get_diagnostics": "",
"compute:get_lock": "",
"compute:lock": "",
"compute:unlock": "",
"compute:get_vnc_console": "",
"compute:get_spice_console": "",
"compute:get_console_output": "",
"compute:associate_floating_ip": "",
"compute:reset_network": "",
"compute:inject_network_info": "",
"compute:add_fixed_ip": "",
"compute:remove_fixed_ip": "",
"compute:attach_volume": "",
"compute:detach_volume": "",
"compute:inject_file": "",
"compute:set_admin_password": "",
"compute:rescue": "",
"compute:unrescue": "",
"compute:suspend": "",
"compute:resume": "",
"compute:pause": "",
"compute:unpause": "",
"compute:start": "",
"compute:stop": "",
"compute:resize": "",
"compute:confirm_resize": "",
"compute:revert_resize": "",
"compute:rebuild": "",
"compute:reboot": "",
"compute:snapshot": "",
"compute:backup": "",
"compute:shelve": "",
"compute:shelve_offload": "",
"compute:unshelve": "",
"compute:security_groups:add_to_instance": "",
"compute:security_groups:remove_from_instance": "",
"compute:delete": "",
"compute:soft_delete": "",
"compute:force_delete": "",
"compute:restore": "",
"compute_extension:accounts": "",
"compute_extension:admin_actions:pause": "",
"compute_extension:admin_actions:unpause": "",
"compute_extension:admin_actions:suspend": "",
"compute_extension:admin_actions:resume": "",
"compute_extension:admin_actions:lock": "",
"compute_extension:admin_actions:unlock": "",
"compute_extension:admin_actions:resetNetwork": "",
"compute_extension:admin_actions:injectNetworkInfo": "",
"compute_extension:admin_actions:createBackup": "",
"compute_extension:admin_actions:migrateLive": "",
"compute_extension:admin_actions:resetState": "",
"compute_extension:admin_actions:migrate": "",
"compute_extension:v3:os-admin-actions:pause": "",
"compute_extension:v3:os-admin-actions:unpause": "",
"compute_extension:v3:os-admin-actions:suspend": "",
"compute_extension:v3:os-admin-actions:resume": "",
"compute_extension:v3:os-admin-actions:lock": "",
"compute_extension:v3:os-admin-actions:unlock": "",
"compute_extension:v3:os-admin-actions:resetNetwork": "",
"compute_extension:v3:os-admin-actions:injectNetworkInfo": "",
"compute_extension:v3:os-admin-actions:createBackup": "",
"compute_extension:v3:os-admin-actions:migrateLive": "",
"compute_extension:v3:os-admin-actions:resetState": "",
"compute_extension:v3:os-admin-actions:migrate": "",
"compute_extension:aggregates": "",
"compute_extension:v3:os-aggregates": "",
"compute_extension:agents": "",
"compute_extension:v3:os-agents": "",
"compute_extension:attach_interfaces": "",
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:baremetal_nodes": "",
"compute_extension:cells": "",
"compute_extension:v3:os-cells": "",
"compute_extension:certificates": "",
"compute_extension:v3:os-certificates": "",
"compute_extension:cloudpipe": "",
"compute_extension:cloudpipe_update": "",
"compute_extension:config_drive": "",
"compute_extension:v3:os-config-drive": "",
"compute_extension:console_output": "",
"compute_extension:v3:os-console-output": "",
"compute_extension:consoles": "",
"compute_extension:v3:os-remote-consoles": "",
"compute_extension:coverage_ext": "is_admin:True",
"compute_extension:v3:os-coverage": "is_admin:True",
"compute_extension:createserverext": "",
"compute_extension:deferred_delete": "",
"compute_extension:v3:os-deferred-delete": "",
"compute_extension:disk_config": "",
"compute_extension:evacuate": "is_admin:True",
"compute_extension:v3:os-evacuate": "is_admin:True",
"compute_extension:extended_server_attributes": "",
"compute_extension:v3:os-extended-server-attributes": "",
"compute_extension:extended_status": "",
"compute_extension:v3:os-extended-status": "",
"compute_extension:extended_availability_zone": "",
"compute_extension:v3:os-extended-availability-zone": "",
"compute_extension:extended_ips": "",
"compute_extension:extended_ips_mac": "",
"compute_extension:extended_vif_net": "",
"compute_extension:extended_volumes": "",
"compute_extension:v3:os-extended-volumes": "",
"compute_extension:v3:os-extended-volumes:attach": "",
"compute_extension:v3:os-extended-volumes:detach": "",
"compute_extension:fixed_ips": "",
"compute_extension:v3:os-fixed-ips": "",
"compute_extension:flavor_access": "",
"compute_extension:v3:os-flavor-access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:v3:os-flavor-disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:v3:os-flavor-rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
"compute_extension:flavorextraspecs:index": "",
"compute_extension:flavorextraspecs:show": "",
"compute_extension:flavorextraspecs:create": "is_admin:True",
"compute_extension:flavorextraspecs:update": "is_admin:True",
"compute_extension:flavorextraspecs:delete": "is_admin:True",
"compute_extension:flavormanage": "",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
"compute_extension:floating_ips_bulk": "",
"compute_extension:fping": "",
"compute_extension:fping:all_tenants": "is_admin:True",
"compute_extension:v3:os-fping": "",
"compute_extension:v3:os-fping:all_tenants": "is_admin:True",
"compute_extension:hide_server_addresses": "",
"compute_extension:v3:os-hide-server-addresses": "",
"compute_extension:hosts": "",
"compute_extension:v3:os-hosts": "",
"compute_extension:hypervisors": "",
"compute_extension:v3:os-hypervisors": "rule:admin_api",
"compute_extension:image_size": "",
"compute_extension:v3:os-image-metadata": "",
"compute_extension:v3:os-images": "",
"compute_extension:instance_actions": "",
"compute_extension:v3:os-instance-actions": "",
"compute_extension:instance_actions:events": "is_admin:True",
"compute_extension:v3:os-instance-actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "",
"compute_extension:v3:os-instance-usage-audit-log": "",
"compute_extension:keypairs": "",
"compute_extension:keypairs:index": "",
"compute_extension:keypairs:show": "",
"compute_extension:keypairs:create": "",
"compute_extension:keypairs:delete": "",
"compute_extension:v3:os-keypairs": "",
"compute_extension:v3:os-keypairs:index": "",
"compute_extension:v3:os-keypairs:show": "",
"compute_extension:v3:os-keypairs:create": "",
"compute_extension:v3:os-keypairs:delete": "",
"compute_extension:multinic": "",
"compute_extension:v3:os-multinic": "",
"compute_extension:networks": "",
"compute_extension:networks:view": "",
"compute_extension:networks_associate": "",
"compute_extension:os-tenant-networks": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quotas:delete": "",
"compute_extension:v3:os-quota-sets:show": "",
"compute_extension:v3:os-quota-sets:update": "",
"compute_extension:v3:os-quota-sets:delete": "",
"compute_extension:quota_classes": "",
"compute_extension:v3:os-quota-class-sets": "",
"compute_extension:rescue": "",
"compute_extension:v3:os-rescue": "",
"compute_extension:security_group_default_rules": "",
"compute_extension:security_groups": "",
"compute_extension:v3:os-security-groups": "",
"compute_extension:server_diagnostics": "",
"compute_extension:v3:os-server-diagnostics": "",
"compute_extension:server_password": "",
"compute_extension:v3:os-server-password": "",
"compute_extension:server_usage": "",
"compute_extension:services": "",
"compute_extension:v3:os-services": "",
"compute_extension:shelve": "",
"compute_extension:shelveOffload": "",
"compute_extension:v3:os-shelve:shelve": "",
"compute_extension:v3:os-shelve:shelve_offload": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:v3:os-simple-tenant-usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
"compute_extension:v3:os-simple-tenant-usage:list": "",
"compute_extension:unshelve": "",
"compute_extension:v3:os-shelve:unshelve": "",
"compute_extension:users": "",
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
"compute_extension:volume_attachments:index": "",
"compute_extension:volume_attachments:show": "",
"compute_extension:volume_attachments:create": "",
"compute_extension:volume_attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:zones": "",
"compute_extension:availability_zone:list": "",
"compute_extension:v3:os-availability-zone:list": "",
"compute_extension:availability_zone:detail": "is_admin:True",
"compute_extension:v3:os-availability-zone:detail": "is_admin:True",
"compute_extension:used_limits_for_admin": "is_admin:True",
"compute_extension:migrations:index": "is_admin:True",
"volume:create": "",
"volume:get": "",
"volume:get_all": "",
"volume:get_volume_metadata": "",
"volume:delete": "",
"volume:update": "",
"volume:delete_volume_metadata": "",
"volume:update_volume_metadata": "",
"volume:attach": "",
"volume:detach": "",
"volume:reserve_volume": "",
"volume:unreserve_volume": "",
"volume:begin_detaching": "",
"volume:roll_detaching": "",
"volume:check_attach": "",
"volume:check_detach": "",
"volume:initialize_connection": "",
"volume:terminate_connection": "",
"volume:create_snapshot": "",
"volume:delete_snapshot": "",
"volume:get_snapshot": "",
"volume:get_all_snapshots": "",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_actions:upload_image": "",
"volume_extension:types_manage": "",
"volume_extension:types_extra_specs": "",
"network:get_all": "",
"network:get": "",
"network:create": "",
"network:delete": "",
"network:associate": "",
"network:disassociate": "",
"network:get_vifs_by_instance": "",
"network:get_vif_by_mac_address": "",
"network:allocate_for_instance": "",
"network:deallocate_for_instance": "",
"network:validate_networks": "",
"network:get_instance_uuids_by_ip_filter": "",
"network:get_instance_id_by_floating_address": "",
"network:setup_networks_on_host": "",
"network:get_floating_ip": "",
"network:get_floating_ip_pools": "",
"network:get_floating_ip_by_address": "",
"network:get_floating_ips_by_project": "",
"network:get_floating_ips_by_fixed_address": "",
"network:allocate_floating_ip": "",
"network:deallocate_floating_ip": "",
"network:associate_floating_ip": "",
"network:disassociate_floating_ip": "",
"network:release_floating_ip": "",
"network:migrate_instance_start": "",
"network:migrate_instance_finish": "",
"network:get_fixed_ip": "",
"network:get_fixed_ip_by_address": "",
"network:add_fixed_ip_to_instance": "",
"network:remove_fixed_ip_from_instance": "",
"network:add_network_to_project": "",
"network:get_instance_nw_info": "",
"network:get_dns_domains": "",
"network:add_dns_entry": "",
"network:modify_dns_entry": "",
"network:delete_dns_entry": "",
"network:get_dns_entries_by_address": "",
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
"network:delete_dns_domain": ""
}
"""
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
nova/tests/fake_policy.py
|
Python
|
apache-2.0
| 13,474
|
from core import dates
from core.peers import AllowedPeers
from db.db_manager import db_sync_manager
from delegate.geni.v3 import exceptions
from core.config import ConfParser
from os.path import abspath, dirname, join
import ast
import core
import xmlrpclib
import sys
logger = core.log.getLogger("rmadaptor")
def format_uri(protocol, user, password, address, port, endpoint):
uri = "%s://" % str(protocol)
if user and password:
uri += "%s:%s@" % (str(user), str(password),)
uri += "%s:%s" % (str(address), str(port))
if endpoint and len(endpoint):
if endpoint[0] == "/":
endpoint = endpoint[1:]
uri += "/%s" % str(endpoint)
return uri
class SafeTransportWithCert(xmlrpclib.SafeTransport):
"""
Helper class to force the right certificate for the transport class.
"""
def __init__(self, key_path, cert_path):
if sys.version_info >= (2, 7, 9):
import ssl
xmlrpclib.SafeTransport.__init__(
self,
context=ssl._create_unverified_context())
else:
# No super (old-syle classe)
xmlrpclib.SafeTransport.__init__(self)
self._key_path = key_path
self._cert_path = cert_path
def make_connection(self, host):
"""
This method will automatically be called by the ServerProxy class
when a transport channel is needed.
"""
host_with_cert = (host,
{"key_file": self._key_path,
"cert_file": self._cert_path})
# No super (old-syle classe)
return xmlrpclib.SafeTransport.make_connection(self, host_with_cert)
class AdaptorFactory(xmlrpclib.ServerProxy):
def __init__(self, uri):
try:
abs_path = dirname(dirname(dirname(abspath(__file__))))
abs_path = join(abs_path, "../../")
trusted_certs =\
abspath(join(abs_path, ast.literal_eval(
ConfParser("auth.conf").get("certificates").
get("cert_root"))))
root_cert = join(dirname(trusted_certs), "server.crt")
root_cert_key = join(dirname(trusted_certs), "server.key")
transport = SafeTransportWithCert(root_cert_key, root_cert)
except Exception as e:
logger.error("Failed to load server cert or key from " +
str(trusted_certs) + ". Details: " + str(e))
xmlrpclib.ServerProxy.__init__(self, uri, transport=transport)
@staticmethod
def get_am_info(uri, id):
client = SFAClient(uri)
(type, version) = client.get_version()
db_sync_manager.update_peer_info(id, type, version)
return (type, version)
@staticmethod
def create(type, protocol, user, password, address, port, endpoint,
id, am_type, am_version):
uri = format_uri(protocol, user, password, address, port, endpoint)
if am_type is None or am_version is None:
logger.debug("We need to update the AM info for this RM...")
(am_type, am_version) = AdaptorFactory.get_am_info(uri, id)
logger.debug("AM type: %s, version: %s" % (am_type, am_version,))
accepted_types = ["geni", "GENI", "geni_sfa", "GENI_SFA", "sfa", "SFA"]
allowed_peers = AllowedPeers.get_peers()
if am_type in accepted_types and int(am_version) <= 2:
if type == allowed_peers.get("PEER_CRM"):
return CRMGeniv2Adaptor(uri)
elif type == allowed_peers.get("PEER_SDNRM"):
return SDNRMGeniv2Adaptor(uri)
elif am_type in accepted_types and int(am_version) == 3:
if type == allowed_peers.get("PEER_CRM"):
return CRMGeniv3Adaptor(uri)
elif type == allowed_peers.get("PEER_SDNRM"):
return SDNRMGeniv3Adaptor(uri)
elif type == allowed_peers.get("PEER_SERM"):
return SERMGeniv3Adaptor(uri)
elif type == allowed_peers.get("PEER_TNRM"):
return TNRMGeniv3Adaptor(uri)
elif type == allowed_peers.get("PEER_RO"):
return ROGeniv3Adaptor(uri)
e = "Resource Manager type not implemented yet! "
e += "Details: type=%s,version=%s" % (str(am_type), str(am_version),)
raise exceptions.GeneralError(e)
@staticmethod
def create_from_db(peer_db):
"""
Create adaptor from information in DB.
@return adaptor_object
@return adaptor_url (useful for domain identification)
"""
adaptor_endpoint = peer_db.get("endpoint", "")
if adaptor_endpoint and len(adaptor_endpoint):
if adaptor_endpoint[0] == "/":
adaptor_endpoint = adaptor_endpoint[1:]
adaptor_uri = "%s://%s:%s" % (peer_db.get("protocol"),
peer_db.get("address"),
peer_db.get("port"))
if adaptor_endpoint:
adaptor_uri += "/%s" % str(adaptor_endpoint)
adaptor = AdaptorFactory.create(
peer_db.get("type"), peer_db.get("protocol"), peer_db.get("user"),
peer_db.get("password"), peer_db.get("address"),
peer_db.get("port"), peer_db.get("endpoint"), peer_db.get("_id"),
peer_db.get("am_type"), peer_db.get("am_version"))
return (adaptor, adaptor_uri)
@staticmethod
def geni_v3_credentials():
from core.utils import calls
try:
(text, ucredential) = calls.getusercred(geni_api=3)
return ucredential["geni_value"]
except Exception as e:
logger.error("Unable to get user-cred from CH: %s" % (e,))
raise e
class SFAClient(AdaptorFactory):
def __init__(self, uri, type=None, version=None):
AdaptorFactory.__init__(self, uri)
self.uri = uri
self.geni_type = type
self.geni_api_version = version
def get_version(self):
try:
logger.debug("Get the required information of the peer")
rspec_version = self.GetVersion()
logger.debug("Rspec version: %s" % (rspec_version,))
values = rspec_version.get("value")
# We need at least the type and the (supported) request version
self.geni_type = rspec_version.get("code").get("am_type")
self.geni_api_version = values.get("geni_api")
if not self.geni_type: # we assume GENI as default
self.geni_type = "geni"
return (self.geni_type, self.geni_api_version)
except Exception as e:
raise exceptions.RPCError("SFA GetVersion failure: %s" % str(e))
def __str__(self):
return "[%s, %s, %s]" %\
(self.uri, self.geni_type, self.geni_api_version)
def api_version(self):
return self.geni_api_version
def sfa_type(self):
return self.geni_type
class SFAv2Client(SFAClient):
def __init__(self, uri):
SFAClient.__init__(self, uri, type="sfa", version=2)
def format_options(self, available):
return {"geni_available": available,
"geni_compress": False,
"geni_rspec_version": {
"type": self.geni_type,
"version": self.geni_api_version, }}
class GENIv3Client(SFAClient):
def __init__(self, uri, typee):
SFAClient.__init__(self, uri, type="geni", version=3)
self.typee = typee
logger.info("GENIv3Client %s created." % (self.typee,))
def format_options(self, available=None, compress=None, end_time=None,
best_effort=None, users=[]):
options = {"geni_rspec_version": {"type": "geni",
"version": 3, }}
if available:
options["geni_available"] = available
if compress:
options["geni_compress"] = compress
if end_time:
# Convert to rfc3339 prior to sending
options["geni_end_time"] = dates.datetime_to_rfc3339(end_time)
if best_effort:
options["geni_best_effort"] = best_effort
if users:
options["geni_users"] = users
return options
def format_credentials(self, credentials):
# Credentials must be sent in the proper format
formated_creds = []
if not type(credentials) == list:
credentials = [credentials]
for cred in credentials:
if not type(cred) == dict:
formated_creds.append({"geni_value": cred,
"geni_type": "geni_sfa",
"geni_version": "3"})
else:
formated_creds.append(cred)
return formated_creds
def list_resources(self, credentials, available, inner_call=True):
options = self.format_options(available=available, compress=False)
options["inner_call"] = inner_call
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
logger.debug("%s Options: %s" % (self.typee, options,))
try:
params = [credentials, options, ]
result = self.ListResources(*params)
logger.info("\n\n\n%s ListResources result=%s\n\n\n" %
(self.typee, result,))
return result
except Exception as e:
err = "%s ListResources failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
# -- Helpers
def __get_domain_urn_by_xmlrpcserver(self):
"""
Prepare filter parameters to get the domain.info from the xmlrpcserver
"""
domain_urn = ""
try:
# am_version = str(self.geni_api_version)
# am_type = str(self.geni_type)
# Look for case-independent 'am_type' and numeric 'am_type'
# am_type_re = re.compile(am_type)
# filter_params = {"am_version": int(am_version),
# "am_type": am_type_re}
filter_params = {}
domain_urn = db_sync_manager.get_domain_urn_from_uri(
self.uri, filter_params)
except Exception as e:
logger.warning("get_domain_urn_from_uri failed: %s" % e)
return domain_urn
def __check_errors(self, result):
domain_urn = self.__get_domain_urn_by_xmlrpcserver()
if result.get("output") is not None:
return False, "Error detected in the server (%s @ %s): %s" %\
(self.typee, domain_urn, result.get("output"))
if "geni_slivers" in result.get("value"):
for s in result.get("value").get("geni_slivers"):
if s.get("geni_error"):
return False, "Error detected in a sliver (%s @ %s): %s" %\
(self.typee, domain_urn, s.get("geni_error"))
return True, ""
def allocate(self, slice_urn, credentials, rspec, end_time):
options = self.format_options(end_time=end_time)
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [slice_urn, credentials, rspec, options, ]
result = self.Allocate(*params)
logger.info("\n\n\n%s Allocate result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return (result.get("value").get("geni_rspec"),
result.get("value").get("geni_slivers"))
except Exception as e:
err = "%s Allocate failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
def describe(self, urns, credentials):
options = self.format_options()
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [urns, credentials, options, ]
result = self.Describe(*params)
logger.info("\n\n\n%s Describe result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return (result.get("value").get("geni_rspec"),
result.get("value").get("geni_urn"),
result.get("value").get("geni_slivers"))
except Exception as e:
err = "%s Describe failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
def renew(self, urns, credentials, expiration_time, best_effort):
options = self.format_options(best_effort=best_effort)
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [urns, credentials, expiration_time, options, ]
result = self.Renew(*params)
logger.info("\n\n\n%s Renew result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return result.get("value")
except Exception as e:
err = "%s Renew failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
def status(self, urns, credentials):
options = self.format_options()
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [urns, credentials, options, ]
result = self.Status(*params)
logger.info("\n\n\n%s Status result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return (result.get("value").get("geni_urn"),
result.get("value").get("geni_slivers"))
except Exception as e:
err = "%s Status failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
def perform_operational_action(self, urns, credentials, action,
best_effort):
options = self.format_options(best_effort=best_effort)
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [urns, credentials, action, options, ]
result = self.PerformOperationalAction(*params)
logger.info("\n\n\n%s PerformOperationalAction result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return result.get("value")
except Exception as e:
err = "%s PerformOpAction failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
def delete(self, urns, credentials, best_effort):
options = self.format_options(best_effort=best_effort)
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [urns, credentials, options, ]
result = self.Delete(*params)
logger.info("\n\n\n%s Delete result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return result.get("value")
except Exception as e:
err = "%s Delete failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
def provision(self, urns, credentials, best_effort, end_time, geni_users):
options = self.format_options(best_effort=best_effort,
end_time=end_time,
users=geni_users)
logger.debug("%s Options: %s" % (self.typee, options,))
# Credentials must be sent in the proper format
credentials = self.format_credentials(credentials)
try:
params = [urns, credentials, options, ]
result = self.Provision(*params)
logger.info("\n\n\n%s Provision result=%s\n\n\n" %
(self.typee, result,))
status, err = self.__check_errors(result)
if status is True:
return (result.get("value").get("geni_rspec"),
result.get("value").get("geni_slivers"))
except Exception as e:
err = "%s Provision failure: %s" % (self.typee, str(e))
raise exceptions.RPCError(err)
class CRMGeniv2Adaptor(SFAv2Client):
def __init__(self, uri):
SFAv2Client.__init__(self, uri)
raise exceptions.RPCError("CRMGeniv2Adaptor not supported!")
class SDNRMGeniv2Adaptor(SFAv2Client):
def __init__(self, uri):
SFAv2Client.__init__(self, uri)
raise exceptions.RPCError("SDNRMGeniv2Adaptor not supported!")
class CRMGeniv3Adaptor(GENIv3Client):
def __init__(self, uri):
GENIv3Client.__init__(self, uri, "CRMGeniv3")
class SDNRMGeniv3Adaptor(GENIv3Client):
def __init__(self, uri):
GENIv3Client.__init__(self, uri, "SDNRMGeniv3")
class SERMGeniv3Adaptor(GENIv3Client):
def __init__(self, uri):
GENIv3Client.__init__(self, uri, "SERMGeniv3")
class TNRMGeniv3Adaptor(GENIv3Client):
def __init__(self, uri):
GENIv3Client.__init__(self, uri, "TNRMGeniv3")
class ROGeniv3Adaptor(GENIv3Client):
def __init__(self, uri):
GENIv3Client.__init__(self, uri, "ROGeniv3")
|
ict-felix/stack
|
modules/resource/orchestrator/src/delegate/geni/v3/rm_adaptor.py
|
Python
|
apache-2.0
| 18,116
|
# -*- coding: utf-8 -*-
#
# tests/entidades/test_pis.py
#
# Copyright 2019 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from decimal import Decimal
import pytest
import cerberus
from satcfe.entidades import PISAliq
from satcfe.entidades import PISQtde
from satcfe.entidades import PISNT
from satcfe.entidades import PISSN
from satcfe.entidades import PISOutr
from satcfe.entidades import PISST
def test_PISAliq():
"""XML esperado:
.. sourcecode:: xml
<PISAliq>
<CST>01</CST>
<vBC>1.00</vBC>
<pPIS>0.0065</pPIS>
</PISAliq>
"""
pis = PISAliq(CST='01', vBC=Decimal('1.00'), pPIS=Decimal('0.0065'))
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISAliq'
assert el.find('CST').text == '01'
assert el.find('vBC').text == '1.00'
assert el.find('pPIS').text == '0.0065'
def test_PISQtde():
"""XML esperado:
.. sourcecode:: xml
<PISQtde>
<CST>03</CST>
<qBCProd>100.0000</qBCProd>
<vAliqProd>0.6500</vAliqProd>
</PISQtde>
"""
pis = PISQtde(
CST='03',
qBCProd=Decimal('100.0000'),
vAliqProd=Decimal('0.6500'))
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISQtde'
assert el.find('CST').text == '03'
assert el.find('qBCProd').text == '100.0000'
assert el.find('vAliqProd').text == '0.6500'
def test_PISNT():
"""XML esperado:
.. sourcecode:: xml
<PISNT>
<CST>04</CST>
</PISNT>
"""
pis = PISNT(CST='04')
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISNT'
assert el.find('CST').text == '04'
def test_PISSN():
"""XML esperado:
.. sourcecode:: xml
<PISSN>
<CST>49</CST>
</PISSN>
"""
pis = PISSN(CST='49')
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISSN'
assert el.find('CST').text == '49'
def test_PISOutr_simples_vBC():
"""XML esperado:
.. sourcecode:: xml
<PISOutr>
<CST>99</CST>
<vBC>1.00</vBC>
<pPIS>0.0065</pPIS>
</PISOutr>
"""
pis = PISOutr(CST='99', vBC=Decimal('1.00'), pPIS=Decimal('0.0065'))
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISOutr'
assert el.find('CST').text == '99'
assert el.find('vBC').text == '1.00'
assert el.find('pPIS').text == '0.0065'
def test_PISOutr_simples_qBCProd():
"""XML esperado:
.. sourcecode:: xml
<PISOutr>
<CST>99</CST>
<qBCProd>100.0000</qBCProd>
<vAliqProd>0.6500</vAliqProd>
</PISOutr>
"""
pis = PISOutr(
CST='99',
qBCProd=Decimal('100.0000'),
vAliqProd=Decimal('0.6500'))
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISOutr'
assert el.find('CST').text == '99'
assert el.find('qBCProd').text == '100.0000'
assert el.find('vAliqProd').text == '0.6500'
def test_PISOutr_vBC_sem_pPIS():
# atributo vBC depende de pPIS que não foi informado
pis = PISOutr(CST='99', vBC=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISOutr_qBCProd_sem_vAliqProd():
# atributo qBCProd depende de vAliqProd que não foi informado
pis = PISOutr(CST='99', qBCProd=Decimal('100.0000'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISOutr_sem_vBC_nem_qBCProd():
# deve falhar pois vBC nem qBCProd foram informados
pis = PISOutr(CST='99')
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISOutr_vBC_e_qBCProd_sao_mutuamente_exclusivos():
# deve falhar pois apenas um ou outro grupo pode ser informado: ou
# informa-se vBC e pPIS ou informa-se qBCProd e vAliqProd
pis = PISOutr(
CST='99',
vBC=Decimal('1.00'),
pPIS=Decimal('1.00'),
qBCProd=Decimal('1.00'),
vAliqProd=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISOutr_pPIS_sem_vBC():
# o atributo pPIS requer que vBC tenha sido informado
pis = PISOutr(CST='99', pPIS=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISOutr_vAliqProd_sem_qBCProd():
# o atributo vAliqProd requer que qBCProd tenha sido informado
pis = PISOutr(CST='99', vAliqProd=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISST_simples_vBC():
"""XML esperado:
.. sourcecode:: xml
<PISST>
<vBC>1.00</vBC>
<pPIS>0.0065</pPIS>
</PISST>
"""
pis = PISST(vBC=Decimal('1.00'), pPIS=Decimal('0.0065'))
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISST'
assert el.find('vBC').text == '1.00'
assert el.find('pPIS').text == '0.0065'
def test_PISST_simples_qBCProd():
"""XML esperado:
.. sourcecode:: xml
<PISST>
<qBCProd>100.0000</qBCProd>
<vAliqProd>0.6500</vAliqProd>
</PISST>
"""
pis = PISST(
qBCProd=Decimal('100.0000'),
vAliqProd=Decimal('0.6500'))
el = pis._xml() # xml.etree.ElementTree.Element
assert el.tag == 'PISST'
assert el.find('qBCProd').text == '100.0000'
assert el.find('vAliqProd').text == '0.6500'
def test_PISST_vBC_sem_pPIS():
# atributo vBC depende de pPIS que não foi informado
pis = PISST(vBC=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISST_qBCProd_sem_vAliqProd():
# atributo qBCProd depende de vAliqProd que não foi informado
pis = PISST(qBCProd=Decimal('100.0000'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISST_sem_vBC_nem_qBCProd():
# deve falhar pois vBC nem qBCProd foram informados
pis = PISST()
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISST_vBC_e_qBCProd_sao_mutuamente_exclusivos():
# deve falhar pois apenas um ou outro grupo pode ser informado: ou
# informa-se vBC e pPIS ou informa-se qBCProd e vAliqProd
pis = PISST(
vBC=Decimal('1.00'),
pPIS=Decimal('1.00'),
qBCProd=Decimal('1.00'),
vAliqProd=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISST_pPIS_sem_vBC():
# o atributo pPIS requer que vBC tenha sido informado
pis = PISST(pPIS=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
def test_PISST_vAliqProd_sem_qBCProd():
# o atributo vAliqProd requer que qBCProd tenha sido informado
pis = PISST(vAliqProd=Decimal('1.00'))
with pytest.raises(cerberus.DocumentError):
pis._xml()
|
base4sistemas/satcfe
|
tests/entidades/test_pis.py
|
Python
|
apache-2.0
| 7,584
|
"""Helps build config packages for installer-specific templates.
Takes in a bunch of configuration files, as well as functions to calculate the values/strings which
need to be put into the configuration.
Operates strictly:
- All paramaters are strings. All things calculated / derived are strings.
- Every given parameter must map to some real config option.
- Every config option must be given only once.
- Defaults can be overridden. If no default is given, the parameter must be specified
- empty string is not the same as "not specified"
"""
import importlib.machinery
import json
import logging as log
import os
import os.path
import pprint
import textwrap
from copy import copy, deepcopy
from typing import List
import yaml
import gen.calc
import gen.internals
import gen.template
import gen.util
from gen.exceptions import ValidationError
from pkgpanda import PackageId
from pkgpanda.util import hash_checkout, json_prettyprint, load_string, split_by_token, write_json, write_yaml
# List of all roles all templates should have.
role_names = {"master", "slave", "slave_public"}
role_template = '/etc/mesosphere/roles/{}'
CLOUDCONFIG_KEYS = {'coreos', 'runcmd', 'apt_sources', 'root', 'mounts', 'disk_setup', 'fs_setup', 'bootcmd'}
PACKAGE_KEYS = {'package', 'root'}
def stringify_configuration(configuration: dict):
"""Create a stringified version of the complete installer configuration
to send to gen.generate()"""
gen_config = {}
for key, value in configuration.items():
if isinstance(value, list) or isinstance(value, dict):
log.debug("Caught %s for genconf configuration, transforming to JSON string: %s", type(value), value)
value = json.dumps(value)
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
elif isinstance(value, int):
log.debug("Caught int for genconf configuration, transforming to string: %s", value)
value = str(value)
elif isinstance(value, str):
pass
else:
log.error("Invalid type for value of %s in config. Got %s, only can handle list, dict, "
"int, bool, and str", key, type(value))
raise Exception()
gen_config[key] = value
log.debug('Stringified configuration: \n{}'.format(gen_config))
return gen_config
def add_roles(cloudconfig, roles):
for role in roles:
cloudconfig['write_files'].append({
"path": role_template.format(role),
"content": ""})
return cloudconfig
def add_units(cloudconfig, services, cloud_init_implementation='coreos'):
'''
Takes a services dict in the format of CoreOS cloud-init 'units' and
injects into cloudconfig a transformed version appropriate for the
cloud_init_implementation. See:
https://coreos.com/os/docs/latest/cloud-config.html for the CoreOS 'units'
specification. See: https://cloudinit.readthedocs.io/en/latest/index.html
for the Canonical implementation.
Parameters:
* cloudconfig is a dict
* services is a list of dict's
* cloud_init_implementation is a string: 'coreos' or 'canonical'
'''
if cloud_init_implementation == 'canonical':
cloudconfig.setdefault('write_files', [])
cloudconfig.setdefault('runcmd', [])
for unit in services:
unit_name = unit['name']
if 'content' in unit:
write_files_entry = {'path': '/etc/systemd/system/{}'.format(unit_name),
'content': unit['content'],
'permissions': '0644'}
cloudconfig['write_files'].append(write_files_entry)
if 'enable' in unit and unit['enable']:
runcmd_entry = ['systemctl', 'enable', unit_name]
cloudconfig['runcmd'].append(runcmd_entry)
if 'command' in unit:
opts = []
if 'no_block' in unit and unit['no_block']:
opts.append('--no-block')
if unit['command'] in ['start', 'stop', 'reload', 'restart', 'try-restart', 'reload-or-restart',
'reload-or-try-restart']:
runcmd_entry = ['systemctl'] + opts + [unit['command'], unit_name]
else:
raise Exception("Unsupported unit command: {}".format(unit['command']))
cloudconfig['runcmd'].append(runcmd_entry)
elif cloud_init_implementation == 'coreos':
cloudconfig.setdefault('coreos', {}).setdefault('units', [])
cloudconfig['coreos']['units'] += services
else:
raise Exception("Parameter value '{}' is invalid for cloud_init_implementation".format(
cloud_init_implementation))
return cloudconfig
# For converting util -> a namespace only.
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def render_cloudconfig(data):
return "#cloud-config\n" + render_yaml(data)
utils = Bunch({
"role_template": role_template,
"add_roles": add_roles,
"role_names": role_names,
"add_services": None,
"add_units": add_units,
"render_cloudconfig": render_cloudconfig
})
def render_yaml(data):
return yaml.dump(data, default_style='|', default_flow_style=False)
# Recursively merge to python dictionaries.
# If both base and addition contain the same key, that key's value will be
# merged if it is a dictionary.
# This is unlike the python dict.update() method which just overwrites matching
# keys.
def merge_dictionaries(base, additions):
base_copy = base.copy()
for k, v in additions.items():
try:
if k not in base:
base_copy[k] = v
continue
if isinstance(v, dict) and isinstance(base_copy[k], dict):
base_copy[k] = merge_dictionaries(base_copy.get(k, dict()), v)
continue
# Append arrays
if isinstance(v, list) and isinstance(base_copy[k], list):
base_copy[k].extend(v)
continue
# Merge sets
if isinstance(v, set) and isinstance(base_copy[k], set):
base_copy[k] |= v
continue
# Unknown types
raise ValueError("Can't merge type {} into type {}".format(type(v), type(base_copy[k])))
except ValueError as ex:
raise ValueError("{} inside key {}".format(ex, k)) from ex
return base_copy
def load_templates(template_dict):
result = dict()
for name, template_list in template_dict.items():
result_list = list()
for template_name in template_list:
result_list.append(gen.template.parse_resources(template_name))
extra_filename = "gen_extra/" + template_name
if os.path.exists(extra_filename):
result_list.append(gen.template.parse_str(
load_string(extra_filename)))
result[name] = result_list
return result
# Render the Jinja/YAML into YAML, then load the YAML and merge it to make the
# final configuration files.
def render_templates(template_dict, arguments):
rendered_templates = dict()
templates = load_templates(template_dict)
for name, templates in templates.items():
full_template = None
for template in templates:
rendered_template = template.render(arguments)
# If not yaml, just treat opaquely.
if not name.endswith('.yaml'):
# No merging support currently.
assert len(templates) == 1
full_template = rendered_template
continue
template_data = yaml.safe_load(rendered_template)
if full_template:
full_template = merge_dictionaries(full_template, template_data)
else:
full_template = template_data
rendered_templates[name] = full_template
return rendered_templates
# Collect the un-bound / un-set variables from all the given templates to build
# the schema / configuration target. The templates and their structure serve
# as the schema for what configuration a user must provide.
def target_from_templates(template_dict):
# NOTE: the individual yaml template targets are merged into one target
# since we never want to target just one template at a time for now (they
# all merge into one config package).
target = gen.internals.Target()
templates = load_templates(template_dict)
for template_list in templates.values():
for template in template_list:
target += template.target_from_ast()
return [target]
def write_to_non_taken(base_filename, json):
number = 0
filename = base_filename
while (os.path.exists(filename)):
number += 1
filename = base_filename + '.{}'.format(number)
write_json(filename, json)
return filename
def do_gen_package(config, package_filename):
# Generate the specific dcos-config package.
# Version will be setup-{sha1 of contents}
with gen.util.pkgpanda_package_tmpdir() as tmpdir:
# Only contains package, root
assert config.keys() == {"package"}
# Write out the individual files
for file_info in config["package"]:
assert file_info.keys() <= {"path", "content", "permissions"}
if file_info['path'].startswith('/'):
path = tmpdir + file_info['path']
else:
path = tmpdir + '/' + file_info['path']
try:
if os.path.dirname(path):
os.makedirs(os.path.dirname(path), mode=0o755)
except FileExistsError:
pass
with open(path, 'w') as f:
f.write(file_info['content'])
# the file has special mode defined, handle that.
if 'permissions' in file_info:
assert isinstance(file_info['permissions'], str)
os.chmod(path, int(file_info['permissions'], 8))
else:
os.chmod(path, 0o644)
gen.util.make_pkgpanda_package(tmpdir, package_filename)
def render_late_content(content, late_values):
def _dereference_placeholders(parts):
for part, is_placeholder in parts:
if is_placeholder:
if part not in late_values:
log.debug('Found placeholder for unknown value "{}" in late config: {}'.format(part, repr(content)))
raise Exception('Bad late config file: Found placeholder for unknown value "{}"'.format(part))
yield late_values[part]
else:
yield part
return ''.join(_dereference_placeholders(split_by_token(
gen.internals.LATE_BIND_PLACEHOLDER_START,
gen.internals.LATE_BIND_PLACEHOLDER_END,
content,
strip_token_decoration=True,
)))
def _late_bind_placeholder_in(string_):
return gen.internals.LATE_BIND_PLACEHOLDER_START in string_ or gen.internals.LATE_BIND_PLACEHOLDER_END in string_
def resolve_late_package(config, late_values):
resolved_config = {
'package': [
{k: render_late_content(v, late_values) if k == 'content' else v for k, v in file_info.items()}
for file_info in config['package']
]
}
assert not any(
_late_bind_placeholder_in(v) for file_info in resolved_config['package'] for v in file_info.values()
), 'Resolved late package must not contain late value placeholder: {}'.format(resolved_config)
return resolved_config
def extract_files_containing_late_variables(start_files):
found_files = []
left_files = []
for file_info in deepcopy(start_files):
assert not any(_late_bind_placeholder_in(v) for k, v in file_info.items() if k != 'content'), (
'File info must not contain late config placeholder in fields other than content: {}'.format(file_info)
)
if _late_bind_placeholder_in(file_info['content']):
found_files.append(file_info)
else:
left_files.append(file_info)
# All files still belong somewhere
assert len(found_files) + len(left_files) == len(start_files)
return found_files, left_files
# Validate all arguments passed in actually correspond to parameters to
# prevent human typo errors.
# This includes all possible sub scopes (Including config for things you don't use is fine).
def flatten_parameters(scoped_parameters):
flat = copy(scoped_parameters.get('variables', set()))
for name, possible_values in scoped_parameters.get('sub_scopes', dict()).items():
flat.add(name)
for sub_scope in possible_values.values():
flat |= flatten_parameters(sub_scope)
return flat
def validate_all_arguments_match_parameters(parameters, setters, arguments):
errors = dict()
# Gather all possible parameters from templates as well as setter parameters.
all_parameters = flatten_parameters(parameters)
for setter_list in setters.values():
for setter in setter_list:
all_parameters |= setter.parameters
all_parameters.add(setter.name)
all_parameters |= {name for name, value in setter.conditions}
# Check every argument is in the set of parameters.
for argument in arguments:
if argument not in all_parameters:
errors[argument] = 'Argument {} given but not in possible parameters {}'.format(argument, all_parameters)
if len(errors):
raise ValidationError(errors, set())
def validate(
arguments,
extra_templates=list(),
extra_sources=list()):
sources, targets, _ = get_dcosconfig_source_target_and_templates(arguments, extra_templates, extra_sources)
return gen.internals.resolve_configuration(sources, targets).status_dict
def user_arguments_to_source(user_arguments) -> gen.internals.Source:
"""Convert all user arguments to be a gen.internals.Source"""
# Make sure all user provided arguments are strings.
# TODO(cmaloney): Loosen this restriction / allow arbitrary types as long
# as they all have a gen specific string form.
gen.internals.validate_arguments_strings(user_arguments)
user_source = gen.internals.Source(is_user=True)
for name, value in user_arguments.items():
user_source.add_must(name, value)
return user_source
# TODO(cmaloney): This function should disolve away like the ssh one is and just become a big
# static dictonary or pass in / construct on the fly at the various template callsites.
def get_dcosconfig_source_target_and_templates(
user_arguments: dict,
extra_templates: List[str],
extra_sources: List[gen.internals.Source]):
log.info("Generating configuration files...")
# TODO(cmaloney): Make these all just defined by the base calc.py
config_package_names = ['dcos-config', 'dcos-metadata']
template_filenames = ['dcos-config.yaml', 'cloud-config.yaml', 'dcos-metadata.yaml', 'dcos-services.yaml']
# TODO(cmaloney): Check there are no duplicates between templates and extra_template_files
template_filenames += extra_templates
# Re-arrange templates to be indexed by common name. Only allow multiple for one key if the key
# is yaml (ends in .yaml).
templates = dict()
for filename in template_filenames:
key = os.path.basename(filename)
templates.setdefault(key, list())
templates[key].append(filename)
if len(templates[key]) > 1 and not key.endswith('.yaml'):
raise Exception(
"Internal Error: Only know how to merge YAML templates at this point in time. "
"Can't merge template {} in template_list {}".format(filename, templates[key]))
targets = target_from_templates(templates)
base_source = gen.internals.Source(is_user=False)
base_source.add_entry(gen.calc.entry, replace_existing=False)
# Allow overriding calculators with a `gen_extra/calc.py` if it exists
if os.path.exists('gen_extra/calc.py'):
mod = importlib.machinery.SourceFileLoader('gen_extra.calc', 'gen_extra/calc.py').load_module()
base_source.add_entry(mod.entry, replace_existing=True)
def add_builtin(name, value):
base_source.add_must(name, json_prettyprint(value))
sources = [base_source, user_arguments_to_source(user_arguments)] + extra_sources
# TODO(cmaloney): Hash the contents of all the templates rather than using the list of filenames
# since the filenames might not live in this git repo, or may be locally modified.
add_builtin('template_filenames', template_filenames)
add_builtin('config_package_names', list(config_package_names))
# TODO(cmaloney): user_arguments needs to be a temporary_str since we need to only include used
# arguments inside of it.
add_builtin('user_arguments', user_arguments)
# Add a builtin for expanded_config, so that we won't get unset argument errors. The temporary
# value will get replaced with the set of all arguments once calculation is complete
temporary_str = 'DO NOT USE THIS AS AN ARGUMENT TO OTHER ARGUMENTS. IT IS TEMPORARY'
add_builtin('expanded_config', temporary_str)
# Note: must come last so the hash of the "base_source" this is beign added to contains all the
# variables but this.
add_builtin('sources_id', hash_checkout([hash_checkout(source.make_id()) for source in sources]))
return sources, targets, templates
def build_late_package(late_files, config_id, provider):
if not late_files:
return None
# Add a empty pkginfo.json to the late package after validating there
# isn't already one.
for file_info in late_files:
assert file_info['path'] != '/pkginfo.json'
assert file_info['path'].startswith('/')
late_files.append({
"path": "/pkginfo.json",
"content": "{}"})
return {
'package': late_files,
'name': 'dcos-provider-{}-{}--setup'.format(config_id, provider)
}
def validate_and_raise(sources, targets):
# TODO(cmaloney): Make it so we only get out the dcosconfig target arguments not all the config target arguments.
resolver = gen.internals.resolve_configuration(sources, targets)
status = resolver.status_dict
if status['status'] == 'errors':
raise ValidationError(errors=status['errors'], unset=status['unset'])
return resolver
def get_late_variables(resolver, sources):
# Gather out the late variables. The presence of late variables changes
# whether or not a late package is created
late_variables = dict()
# TODO(branden): Get the late vars and expressions from resolver.late
for source in sources:
for setter_list in source.setters.values():
for setter in setter_list:
if not setter.is_late:
continue
if setter.name not in resolver.late:
continue
# Skip late vars that aren't referenced by config.
if not resolver.arguments[setter.name].is_finalized:
continue
# Validate a late variable should only have one source.
assert setter.name not in late_variables
late_variables[setter.name] = setter.late_expression
log.debug('Late variables:\n{}'.format(pprint.pformat(late_variables)))
return late_variables
def get_final_arguments(resolver):
return {k: v.value for k, v in resolver.arguments.items() if v.is_finalized}
def generate(
arguments,
extra_templates=list(),
extra_sources=list(),
extra_targets=list()):
# To maintain the old API where we passed arguments rather than the new name.
user_arguments = arguments
arguments = None
sources, targets, templates = get_dcosconfig_source_target_and_templates(
user_arguments, extra_templates, extra_sources)
resolver = validate_and_raise(sources, targets + extra_targets)
argument_dict = get_final_arguments(resolver)
late_variables = get_late_variables(resolver, sources)
# expanded_config is a special result which contains all other arguments. It has to come after
# the calculation of all the other arguments so it can be filled with everything which was
# calculated. Can't be calculated because that would have an infinite recursion problem (the set
# of all arguments would want to include itself).
# Explicitly / manaully setup so that it'll fit where we want it.
# TODO(cmaloney): Make this late-bound by gen.internals
argument_dict['expanded_config'] = textwrap.indent(
json_prettyprint(
{k: v for k, v in argument_dict.items() if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)}
),
prefix=' ' * 3,
)
log.debug("Final arguments:" + json_prettyprint(argument_dict))
# Fill in the template parameters
# TODO(cmaloney): render_templates should ideally take the template targets.
rendered_templates = render_templates(templates, argument_dict)
# Validate there aren't any unexpected top level directives in any of the files
# (likely indicates a misspelling)
for name, template in rendered_templates.items():
if name == 'dcos-services.yaml': # yaml list of the service files
assert isinstance(template, list)
elif name == 'cloud-config.yaml':
assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
elif isinstance(template, str): # Not a yaml template
pass
else: # yaml template file
log.debug("validating template file %s", name)
assert template.keys() <= PACKAGE_KEYS, template.keys()
# Find all files which contain late bind variables and turn them into a "late bind package"
# TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
late_files, regular_files = extract_files_containing_late_variables(
rendered_templates['dcos-config.yaml']['package'])
# put the regular files right back
rendered_templates['dcos-config.yaml'] = {'package': regular_files}
def make_package_filename(package_id, extension):
return 'packages/{0}/{1}{2}'.format(
package_id.name,
repr(package_id),
extension)
# Render all the cluster packages
cluster_package_info = {}
# Prepare late binding config, if any.
late_package = build_late_package(late_files, argument_dict['config_id'], argument_dict['provider'])
if late_variables:
# Render the late binding package. This package will be downloaded onto
# each cluster node during bootstrap and rendered into the final config
# using the values from the late config file.
late_package_id = PackageId(late_package['name'])
late_package_filename = make_package_filename(late_package_id, '.dcos_config')
os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
write_yaml(late_package_filename, {'package': late_package['package']}, default_flow_style=False)
log.info('Package filename: {}'.format(late_package_filename))
# Add the late config file to cloud config. The expressions in
# late_variables will be resolved by the service handling the cloud
# config (e.g. Amazon CloudFormation). The rendered late config file
# on a cluster node's filesystem will contain the final values.
rendered_templates['cloud-config.yaml']['root'].append({
'path': '/etc/mesosphere/setup-flags/late-config.yaml',
'permissions': '0644',
'owner': 'root',
# TODO(cmaloney): don't prettyprint to save bytes.
# NOTE: Use yaml here simply to make avoiding painful escaping and
# unescaping easier.
'content': render_yaml({
'late_bound_package_id': late_package['name'],
'bound_values': late_variables
})})
# Collect metadata for cluster packages.
for package_id_str in json.loads(argument_dict['cluster_packages']):
package_id = PackageId(package_id_str)
package_filename = make_package_filename(package_id, '.tar.xz')
cluster_package_info[package_id.name] = {
'id': package_id_str,
'filename': package_filename
}
# Render config packages.
config_package_ids = json.loads(argument_dict['config_package_ids'])
for package_id_str in config_package_ids:
package_id = PackageId(package_id_str)
do_gen_package(rendered_templates[package_id.name + '.yaml'], cluster_package_info[package_id.name]['filename'])
# Convert cloud-config to just contain write_files rather than root
cc = rendered_templates['cloud-config.yaml']
# Shouldn't contain any packages. Providers should pull what they need to
# late bind out of other packages via cc_package_file.
assert 'package' not in cc
cc_root = cc.pop('root', [])
# Make sure write_files exists.
assert 'write_files' not in cc
cc['write_files'] = []
# Do the transform
for item in cc_root:
assert item['path'].startswith('/')
cc['write_files'].append(item)
rendered_templates['cloud-config.yaml'] = cc
# Add in the add_services util. Done here instead of the initial
# map since we need to bind in parameters
def add_services(cloudconfig, cloud_init_implementation):
return add_units(cloudconfig, rendered_templates['dcos-services.yaml'], cloud_init_implementation)
utils.add_services = add_services
return Bunch({
'arguments': argument_dict,
'cluster_packages': cluster_package_info,
'config_package_ids': config_package_ids,
'late_package_id': late_package['name'] if late_package else None,
'templates': rendered_templates,
'utils': utils
})
|
BenWhitehead/dcos
|
gen/__init__.py
|
Python
|
apache-2.0
| 26,189
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 operator test cases.
"""
import numpy as np
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list
import topi.testing
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_conv2d_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.var("n"), 10, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv2d(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, h, w = tvm.var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, h, w = tvm.var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222), "int32")
# Infer with a different layout
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n//4, c//4, h, w, 4, 4), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(x, wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NCHW4n4c",
kernel_layout="OIHW4o4i",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4, 224, 224, 4, 4), "int32")
assert yy.args[1].checked_type == relay.TensorType(
(4, 8, 3, 3, 4, 4), "int8")
# Infer with NHWC
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(x, wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NHWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, h, w, 16), "int32")
def test_conv2d_run():
def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
fref=None,
groups=1,
dilation=(1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# depthwise conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),
fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(
x, w, (1, 1), "SAME"))
# CUDA is disabled for 'direct' schedule:
# https://github.com/dmlc/tvm/pull/3070#issuecomment-486597553
# group conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3),
except_targets=['cuda'])
# also group conv2d
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3),
except_targets=['cuda'])
# normal conv2d
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3))
# mixed precision
run_test_conv2d("int8", "int32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3))
kshape = (10, 3, 1, 3)
# mixed precision.
run_test_conv2d("int8", "int32", 1, dshape, kshape,
padding=(0, 1), channels=10, kernel_size=(1 ,3))
# dilated conv2d
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3))
def test_conv2d_transpose_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.conv2d_transpose(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=15)
assert "channels=15" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 10, 12), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(10, 15, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, h, w = tvm.var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
y = relay.nn.conv2d_transpose(x, w,
output_padding=(1, 1),
channels=11,
data_layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 15, 11), "float32")
def test_conv2d_transpose_run():
dshape = (1, 3, 18, 18)
kshape = (3, 10, 3, 3)
oshape = (1, 10, 37, 37)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv2d_transpose(x, w,
channels=10, kernel_size=(3,3), strides=(2,2),
padding=(1,1), output_padding=(2, 2))
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
c_np = topi.testing.conv2d_transpose_nchw_python(
data, kernel, 2, 1)
d_np = np.zeros(shape=oshape)
d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np
ref_res = d_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_upsampling_infer_type():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.upsampling(x, scale=2, layout="NCHW", method="BILINEAR")
"method=\"BINLINEAR\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h*2, w*2), "float32")
n, c = tvm.var("n"), tvm.var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32"))
y = relay.nn.upsampling(x, scale=2, layout="NCHW", method="BILINEAR")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32")
def _test_pool2d(opfunc, reffunc):
n, c, h, w = tvm.var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5))
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_global_pool2d(opfunc, reffunc):
n, c, h, w = tvm.var("n"), tvm.var("c"), 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
y = opfunc(x, layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32")
n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32")
# test execution
dtype = "float32"
dshape = (1, 1024, 7, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2,3), keepdims=True)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_pool2d():
_test_pool2d(relay.nn.max_pool2d, np.max)
_test_pool2d(relay.nn.avg_pool2d, np.mean)
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)
def test_avg_pool2d_no_count_pad():
kh, kw = (4, 4)
sh, sw = (2, 2)
ph, pw = (2, 2)
n = 1
(ic, ih, iw) = (3, 28, 28)
(oc, oh, ow) = (3, 15, 15)
dshape = (n, ic, ih, iw)
x = relay.var("x", shape=dshape)
y = relay.nn.avg_pool2d(x,
pool_size=(kh, kw),
strides=(sw, sw),
padding=(ph, pw),
count_include_pad=False)
func = relay.Function([x], y)
dtype = "float32"
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)
no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))
pad_np[np.ix_(*no_zero)] = a_np
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
for i in range(oh):
for j in range(ow):
pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],
axis=(2,3)) / np.maximum(pad_count, 1)
ref_res = np.maximum(b_np, 0.0)
data = a_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_flatten_infer_type():
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_pad_infer_type():
# entirely concrete case
n, c, h, w = 1, 2, 3, 4
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
"pad_width=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32")
# some symbolic values
n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
def test_pad_run():
def _test_run(dtype):
dshape = (4, 10, 7, 7)
x = relay.var("x", shape=dshape)
y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run('float32')
_test_run('int32')
def test_lrn():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
"alpha=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
size=5
axis=1
bias=0.5
alpha=.00001
beta=0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_l2_normalize():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps=0.001
axis=1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def batch_flatten(data):
shape = data.shape
target_dim = 1
for i in range(len(shape) - 1):
target_dim = target_dim * shape[i + 1]
return np.reshape(data, (shape[0], target_dim))
def test_batch_flatten():
t1 = relay.TensorType((5, 10, 5))
x = relay.Var("x", t1)
func = relay.Function([x], relay.nn.batch_flatten(x))
data = np.random.rand(5, 10, 5).astype(t1.dtype)
ref_res = batch_flatten(data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def _test_upsampling(layout, method):
n, c, h, w = tvm.var("n"), 16, 32, 32
scale = 2
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, h*scale, w*scale)
else:
return (h, w, c), (h*scale, w*scale, c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "NEAREST_NEIGHBOR":
ref = topi.testing.upsampling_python(data, (scale, scale), layout)
else:
ref = topi.testing.bilinear_resize_python(data, (h*scale, w*scale), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
def test_upsampling():
_test_upsampling("NCHW", "NEAREST_NEIGHBOR")
_test_upsampling("NCHW", "BILINEAR")
_test_upsampling("NHWC", "NEAREST_NEIGHBOR")
_test_upsampling("NHWC", "BILINEAR")
if __name__ == "__main__":
test_pool2d()
test_avg_pool2d_no_count_pad()
test_lrn()
test_l2_normalize()
test_conv2d_infer_type()
test_upsampling_infer_type()
test_flatten_infer_type()
test_pad_infer_type()
test_pad_run()
test_conv2d_transpose_infer_type()
test_conv2d_transpose_run()
test_conv2d_run()
test_batch_flatten()
test_upsampling()
|
mlperf/training_results_v0.7
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/tests/python/relay/test_op_level2.py
|
Python
|
apache-2.0
| 21,337
|
class Cell:
value = False
neighbourListe = [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2)] # fuer (1,1)
def __init__(self, coordinate):
self.coordinate = coordinate
neighbourListe = [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2)] # fuer (1,1)
def exists(self):
return self.value
def setValue(self, value):
self.value = value
return self.value
def getNeighbours(self):
# calculate neighbours, generate neighbourListe
return self.neighbourListe
if __name__ == '__main__':
pass
|
hemmerling/codingdojo
|
src/game_of_life/python_coderetreat_socramob/cr_socramob05/cell.py
|
Python
|
apache-2.0
| 607
|
"""Trophies-related data model.
"""
from google.appengine.ext import ndb
from heroes.models import Base, BaseExpando
class Trophy(BaseExpando):
"""Trophy, like Worlds Best Football Team. That trophie can change hands at any point
depending on how the Football administration is set up.
"""
"""
BT: a team can also be 2nd or 3rd. The 'owner' would be the last team (country) to place 1st
"""
name = ndb.StringProperty(required=True)
description = ndb.TextProperty()
owner = ndb.KeyProperty()
@property
def link(self):
return '/trophy/update/{}/'.format(self.uid)
|
bentilly/heroes
|
heroes/trophies/models.py
|
Python
|
apache-2.0
| 622
|
from django.utils import html
import CommonMark
from HTMLParser import HTMLParser
def markdown_unescape(markdown):
#pus proper markdown <code> tags instead of >, allowing
#markdown to be applied, which translates and returns markdown
#text
temp_markdown=markdown
#parse to decode markdown
parse_markdown=HTMLParser()
#split commonmark tags
temp_markdown=temp_markdown.split('<')
#set up begining and ending so that you can tell where to
#append the code tags for "unescaping"
begin=0
end=0
unescape_content=[]
for text in temp_markdown:
#deal with the begining and ending code tags, and its decoding
if text.replace(' ','').startswith('code>'):
begin+=1
if text.replace(' ','').startswith('/code>'):
end+=1
#this below code will take into account whether you are in a code
#tag or not
if begin>end:
unescape_content.append(parser.unescape(text))
else:
unescape_content.append(text)
#return all the stuff joined together
return '<'.join(unescape_content)
def markdown_stuff(contentType, markdown):
#checks for block content then escapes content to another
#function before applying and returning markdowned content.
#If not just replace the new line comment with a linebreak
#to make it markdown
clean=html.conditional_escape(contentType)
if markdown:
#markdown block quotes
new_markdown=clean.replace('>', '>')
new_markdown=CommonMark.commonmark(new_markdown)
markdown_text=markdown_unescape(new_markdown)
return markdown_text.replace('\n', '<br/>')
return clean.replace('\n','<br/>')
|
DaynaLacoursiere/cmput404-project
|
squirespace/blog/functions.py
|
Python
|
apache-2.0
| 1,556
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The default implementation of the usb_hub capability.
The usb_hub capability is intended to be used by primary devices that require
the ability to
get or change the USB power mode for a configured port on a USB hub.
The configured USB hub must support the switch_power capability.
"""
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.capabilities.interfaces import usb_hub_base
logger = gdm_logger.get_logger()
class UsbHubDefault(usb_hub_base.UsbHubBase):
"""Base class for usb_hub."""
def __init__(self,
device_name,
get_manager,
hub_name,
device_port,
get_switchboard_if_initialized,
change_triggers_reboot=False,
wait_for_bootup_complete_fn=None,
settable=False):
"""Create an instance of the usb_hub capability.
Args:
device_name (str): name of the device this capability is attached
to.
get_manager (method): A method which returns the Manager instance.
hub_name (str): name of the hub this device is attached to.
device_port (int): usb hub port number used by the device.
get_switchboard_if_initialized (callable): function which returns
a Switchboard instance or None if Switchboard hasn't been initialized.
change_triggers_reboot (bool): Set change_triggers_reboot to TRUE if
changing the USB power mode for the device causes a reboot.
wait_for_bootup_complete_fn (func): A method that the capability can
call to wait for a reboot to complete if triggered by a change.
settable (bool): whether or not the properties are settable.
"""
super(UsbHubDefault, self).__init__(device_name=device_name)
self._hub_name = hub_name
self._device_port = device_port
self._get_switchboard_if_initialized = get_switchboard_if_initialized
self._change_triggers_reboot = change_triggers_reboot
self._wait_for_bootup_complete_fn = wait_for_bootup_complete_fn
self._usb_hub = None
self._settable = settable
self._get_manager = get_manager
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def health_check(self):
"""Checks that the capability is ready to use.
Raises:
CapabilityNotReadyError: if unable to create auxiliary device for
power switching.
"""
unset_props = []
if self.name is None:
unset_props.append("device_usb_hub_name")
if self.device_port is None:
unset_props.append("device_usb_port")
if unset_props:
if self._settable:
msg_format = ("If device is connected to Cambrionix, "
"set them via 'gdm set-prop {} <property> <value>'")
else:
msg_format = ("If device is connected to Cambrionix, "
"set them via 'gdm redetect {}")
msg = msg_format.format(self._device_name)
error_msg = "properties {} are unset. ".format(
" and ".join(unset_props)) + msg
raise errors.CapabilityNotReadyError(
msg=error_msg, device_name=self._device_name)
try:
self._usb_hub = self._get_manager().create_device(self.name)
except (errors.DeviceError, RuntimeError) as err:
raise errors.CapabilityNotReadyError(
msg=repr(err), device_name=self._device_name)
self._healthy = True
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def close(self):
"""Closes the USB hub device instance."""
if self._usb_hub:
self._usb_hub.close()
super().close()
@decorators.PersistentProperty
def name(self):
"""The name of the usb hub.
Returns:
str: usb hub name.
Raises:
DeviceError: usb hub name retrieval failed
"""
return self._hub_name
@decorators.DynamicProperty
def supported_modes(self):
"""Get the USB power modes supported by the USB hub."""
if not self.healthy:
self.health_check()
return self._usb_hub.switch_power.supported_modes
@decorators.PersistentProperty
def device_port(self):
"""The usb hub port number used by device.
Returns:
int: port number on usb hub.
Raises:
DeviceError: usb hub port number retrieval failed
"""
return self._device_port
@decorators.CapabilityLogDecorator(logger)
def check_device_ready(self):
self.health_check()
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def get_device_power(self):
"""Gets usb port mode if set.
Returns:
str: 'sync', 'charge', or 'off'
Raises:
DeviceError: if key 'mode' doesn't exist
"""
if not self.healthy:
self.health_check()
return self._usb_hub.switch_power.get_mode(self._device_port)
@decorators.CapabilityLogDecorator(logger)
def set_device_power(self, mode, no_wait=False):
"""Turns associated powered usb hub port, if available, power state to sync, off, charge.
Args:
mode (str): power mode to set USB hub port to ("sync", "off",
"charge")
no_wait (bool): return before boot up is complete. Default: False.
Raises:
DeviceError: if invalid mode provided
Notes:
'sync' is data and power on, 'charge' is power only on, 'off' is
both off.
"""
self._set_port_mode(mode, self.device_port, no_wait)
@decorators.CapabilityLogDecorator(logger)
def power_off(self, port, no_wait=False):
"""This command powers off the port specified or all ports if port is None.
Args:
port (int): identifies which hub port to power off
no_wait (bool): Return before boot up is complete. Default: False.
Raises:
DeviceError: port number invalid.
"""
self._set_port_mode("off", port, no_wait)
@decorators.CapabilityLogDecorator(logger)
def power_on(self, port, data_sync=True, no_wait=False):
"""This command powers on the port specified or all ports if port is None.
Args:
port (int): identifying which hub port to power on
data_sync (bool): True if data should be enabled, false for power
only
no_wait (bool): Return before boot up is complete. Default: False.
Raises:
DeviceError: port number invalid.
"""
mode = "sync"
if not data_sync:
mode = "charge"
self._set_port_mode(mode, port, no_wait)
def _set_port_mode(self, mode, port, no_wait):
"""Set the USB power mode fort the specified port.
Args:
mode (str): power mode to set USB hub port to
port (int): identifies which hub port to set the mode on.
no_wait (bool): Return before boot up is complete. Default: False.
Raises:
DeviceError: mode or port is invalid.
"""
if not self.healthy:
self.health_check()
if self._verify_power_change_needed(mode, self.device_port):
logger.debug(
"{} setting device USB power to '{}' for hub: {} and port: {}".format(
self._device_name, mode, self._hub_name, port))
switchboard = self._get_switchboard_if_initialized()
if switchboard:
switchboard.add_log_note(
"Setting device USB power to '{}' for hub {} and port {}".format(
mode, self._hub_name, port))
if self._change_triggers_reboot:
switchboard.add_log_note(
"GDM triggered reboot via USB power change.")
self._usb_hub.switch_power.set_mode(mode, port)
if self._change_triggers_reboot and not no_wait:
self._wait_for_bootup_complete_fn()
def _verify_power_change_needed(self, mode, port):
"""Returns whether or not port power change needed.
Args:
mode (str): power mode to set USB hub port to.
port (int): Identifies which port to check the power mode.
Returns:
bool: if current mode is not the same as expected mode.
Raises:
DeviceError: if mode provided or usb_hub management is not a valid
option
"""
mode = mode.lower()
if mode not in list(self.supported_modes):
raise errors.DeviceError("{} USB power mode must be in "
"supported_modes".format(self._device_name))
if not self.healthy:
self.health_check()
current_mode = self._usb_hub.switch_power.get_mode(port)
return current_mode != mode
|
google/gazoo-device
|
gazoo_device/capabilities/usb_hub_default.py
|
Python
|
apache-2.0
| 8,992
|
from model.model_contact import Contact
from model.model_group import Group
import random
from fixture.orm import *
def test_delete_contact_to_group(app2, db, orm):
#проверка на пустой список групп
if len(orm.get_group_list()) == 0:
app2.group.create(Group(name="Created in_test_delete_contact_to_group", footer="Created footer_in_test_delete_contact_to_group", header="Created header_in_test_delete_contact_to_group"))
# выбираем любую группу, переходим к ней
app2.navigation.open_home_page()
active_groups = orm.get_group_list()
target_group = random.choice(active_groups)
app2.group.select_some_group_to_view(target_group)
# смотрим, сколько контактов в группе
old_contacts = orm.get_contacts_in_group(target_group)
# проверка на пустой список контактов
if len(old_contacts) == 0:
app2.contact.create(
Contact(firstname2="firstname-created_in_test_delete_contact_to_group", middlename="middlename-created_in_test_delete_contact_to_group", lastname="lastname-created_in_test_delete_contact_to_group",
nickname="nickname-created", title="title-created",
company="company-created", address="address-created", home='home-created', mobile='mobile-created',
work='work-created', fax='fax-created',
email='email-created@mfsa.ru', email2='email2-created@mfsa.ru', email3='email3-created@mfsa.ru',
address2='Another address-created', phone2='home_secondary-created', notes='Some text-created'))
app2.contact.select_contact_by_index(0)
app2.group.add_selected_contact_to_selected_group_by_id(target_group)
app2.navigation.go_to_target_group_by_id(target_group.id)
old_contacts = orm.get_contacts_in_group(target_group)
# old_contacts.append(created_contact1)
# return list(old_contacts)
contact = random.choice(old_contacts)
app2.contact.select_contact_by_id(contact.id)
# удаляем выбранный контакт
app2.contact.delete_selected_contact_in_group_page()
app2.navigation.go_to_target_group_by_id(target_group.id) # просто чтобы контакт успел удалиться
# app2.navigation.go_to_target_group_by_id_v2(target_group.id, text="Users removed.")
# app2.navigation.wait(10)
# собираем новый список контактов этой группы
new_contacts = orm.get_contacts_in_group(target_group)
old_contacts.remove(contact)
# проверка, что список после удаления контакта совпадает с списком ??? каким
assert sorted(new_contacts, key=Group.id_or_max) == sorted(old_contacts, key=Group.id_or_max)
|
anjel-ershova/python_training
|
test/test_delete_contact_from_group.py
|
Python
|
apache-2.0
| 2,890
|
from .to_binary import to_binary
from .add_two_binary import add_two_binary
from .to_decimal import to_decimal
from .substract_binary import substract_binary
|
vtemian/university_projects
|
arhitecure/hmw2/utils/__init__.py
|
Python
|
apache-2.0
| 158
|
"""VcfNormalize module.
This module is used to normalize a file in the VCF format
(see https://samtools.github.io/hts-specs/VCFv4.2.pdf).
Normalizing a file is basically used to make different VCFs
comparable, and it is highly recommended when you are
benchmarcking your call set using a reference call set.
Normalization is specially important for INDELs, as the
same INDEL could be represented in different ways depending
on the caller.
"""
import os
from collections import namedtuple
from Utils.RunProgram import RunProgram
class VcfNormalize:
"""
Normalize variants in a VCF file
"""
def __init__(self, vcf, vt_folder=None, vcflib_folder=None, bgzip_folder=None,
gatk_folder=None, bcftools_folder=None):
"""
Constructor
Parameters
----------
vcf : str
Path to gzipped vcf file.
vt_folder : str, optional
Path to folder containing the vt binary.
vcflib_folder : str, optional
Path to folder containing the different vcflib binaries.
bgzip_folder : str, optional
Path to folder containing the bgzip binary.
gatk_folder : str, optional
Path to folder containing the gatk wrapper script.
bcftools_folder : str, optional
Path to folder containing the bcftools binary.
"""
if os.path.isfile(vcf) is False:
raise Exception("File does not exist")
self.vcf = vcf
self.vt_folder = vt_folder
self.vcflib_folder = vcflib_folder
self.bgzip_folder = bgzip_folder
self.gatk_folder = gatk_folder
self.bcftools_folder = bcftools_folder
def run_vtnormalize(self, outprefix, reference, compress=False,
verbose=False, outdir=None, n=False):
"""
Run vt normalize on a vcf file
Parameters
----------
outprefix : str
Prefix for outputfile.
reference : str
Path to Fasta file with reference.
compress : boolean, optional
bgzip compress the normalized VCF.
outdir : str, optional
If provided, then put output files in this folder.
n : bool, default=False
warns but does not exit when REF is inconsistent
with reference sequence for non SNPs.
verbose : bool, optional
if true, then increase verbosity.
Returns
-------
outprefix : str
A string with path to normalized file.
"""
if self.vt_folder is None:
raise Exception("Provide a vt_folder containing the vt binary")
Arg = namedtuple('Argument', 'option value')
if outdir:
outprefix = "{0}/{1}".format(outdir, outprefix)
outprefix = outprefix+".norm.vcf"
args = [Arg('-r', reference)]
parameters = [self.vcf]
if n is True:
parameters.append('-n')
runner = None
pipelist = None
if compress is True:
outprefix += ".gz"
compressRunner = RunProgram(path=self.bgzip_folder,
program='bgzip',
parameters=['-c', '>',
outprefix])
pipelist = [compressRunner]
elif compress is None or compress is False:
args.append(Arg('-o', outprefix))
runner = RunProgram(path=self.vt_folder,
program='vt normalize',
args=args,
parameters=parameters,
downpipe=pipelist)
if verbose is True:
print("Command line for running vt normalize is: {0}".format(runner.cmd_line))
runner.run_checkoutput()
return outprefix
def run_bcftoolsnorm(self, outprefix, reference, multiallelics=None,
type=None, outdir=None, verbose=False):
"""
Run bcftools norm on a vcf file
Parameters
----------
outprefix : str
Prefix for outputfile.
reference : str
Path to Fasta file with reference.
multiallelic : {'split','merge'}, optional
Operate on multiallelic variants and either split or merge them.
type: : {'snps', 'indels', 'both', 'any'}, optional
If 'multiallelic' is defined then operate on this type of variant.
outdir : str, optional
If provided, then put output files in this folder.
verbose : bool, optional
Ff true, then increase verbosity.
Returns
-------
outprefix : str
A string with path to normalized file.
"""
if outdir:
outprefix = "{0}/{1}".format(outdir, outprefix)
outprefix = outprefix+".norm.vcf.gz"
Arg = namedtuple('Argument', 'option value')
args = [Arg('-f', reference), Arg('-o', outprefix)]
if multiallelics == "split":
if type is None:
raise Exception("'multiallelics' option is defined, "
"so please provide a 'type' value")
args.append(Arg('-m', "\'-{0}\'".format(type)))
elif multiallelics == "merge":
if type is None:
raise Exception("'multiallelics' option is defined,"
" so please provide a 'type' value")
args.append(Arg('-m', "\'+{0}\'".format(type)))
else:
if multiallelics is not None:
raise Exception("'multiallelics' value is not "
"recognized: {0}".format(multiallelics))
parameters = [self.vcf, '-Oz']
runner = RunProgram(path=self.bcftools_folder,
program='bcftools norm',
args=args,
parameters=parameters)
if verbose is True:
print("Command line for running bcftools norm is: {0}".format(runner.cmd_line))
runner.run_checkoutput()
return outprefix
def run_vcfallelicprimitives(self, outprefix, compress=True, outdir=None,
keepinfo=True, keepgeno=True, downstream_pipe=None, verbose=None):
"""
Run vcfallelicprimitives on a vcf file
This program is used to decompose complex variants into a canonical SNP and
indel representation,generating phased genotypes for available samples.
Parameters
----------
outprefix : str
Prefix for outputfiles.
compress : bool, optional
Bgzip compress the normalized VCF.
outdir : str, optional
If provided, then put output files in this folder.
keepinfo : bool, default=True
Maintain site and allele-level annotations when decomposing.
Note that in many cases, such as multisample VCFs, these won't
be valid post-decomposition. For biallelic loci in single-sample
VCFs, they should be usable with caution.
keepgeno : bool, default=True
Maintain genotype-level annotations when decomposing. Similar
caution should be used for this as for keep-info.
downstream_pipe : str, optional
If defined, then pipe the output VCF to other tools.
i.e. "~/bin/vt/vt sort - | ~/bin/vt/vt uniq -".
verbose : bool, optional
if true, then increase verbosity.
Returns
-------
outprefix: str
A string with path to decomposed file.
"""
if outdir:
outprefix = "{0}/{1}".format(outdir, outprefix)
outprefix = outprefix+".aprimitives.vcf"
params = [self.vcf]
if keepinfo is True:
params.append('--keep-info')
if keepgeno is True:
params.append('--keep-geno')
if downstream_pipe is not None:
params.append("| {0}".format(downstream_pipe))
runner = None
pipelist = None
if compress is True:
outprefix += ".gz"
compressRunner = RunProgram(path=self.bgzip_folder,
program='bgzip',
parameters=['-c', '>', outprefix])
pipelist = [compressRunner]
elif compress is None or compress is False:
params.extend(['>', outprefix])
runner = RunProgram(path=self.vcflib_folder,
program='vcfallelicprimitives',
parameters=params,
downpipe=pipelist)
if verbose is True:
print("Command line for running vcfallelicprimitives is: {0}".format(runner.cmd_line))
runner.run_checkoutput()
return outprefix
def run_gatk_VariantsToAllelicPrimitives(self, outprefix, reference,
outdir=None, compress=None, verbose=None):
"""
Run GATK VariantsToAllelicPrimitives in order to decompose MNPs
into more basic/primitive alleles
Parameters
----------
outprefix : str
Prefix for outputfiles.
reference : str
Path to fasta file containing the reference.
outdir : str, optional
If provided, then put output files in this folder.
compress : boolean, optional
Bgzip compress the normalized VCF.
verbose : bool, optional
Ff true, then increase verbosity.
Returns
-------
outprefix : str
A string with path to decomposed file.
"""
if self.gatk_folder is None:
raise Exception("Error. I need that the folder containing the GATK "
"wrapper script!")
if outdir:
outprefix = "{0}/{1}".format(outdir, outprefix)
outprefix = outprefix+".aprimitives.vcf"
Arg = namedtuple('Argument', 'option value')
args = [Arg('-T', 'VariantsToAllelicPrimitives'), Arg('-R', reference),
Arg('-V', self.vcf), Arg('-o', outprefix)]
runner = RunProgram(program="{0}/gatk3".format(self.gatk_folder), args=args)
if verbose is True:
print("Command line is: {0}".format(runner.cmd_line))
stdout, stderr, is_error = runner.run_popen()
if compress is True:
compressRunner = RunProgram(path=self.bgzip_folder, program='bgzip',
parameters=['-c', outprefix, '>', outprefix+".gz"])
compressRunner.run_checkoutput()
#delete tmp files
os.remove(outprefix)
os.remove(outprefix+".idx")
outprefix += ".gz"
elif compress is False:
return outprefix
else:
raise Exception("'compress' parameter is not valid")
return outprefix
|
igsr/igsr_analysis
|
VCF/VcfNormalize.py
|
Python
|
apache-2.0
| 11,345
|
# Copyright 2016 Matthias Gazzari
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''the library version'''
__version__ = '0.38.0'
|
qtux/instmatcher
|
instmatcher/version.py
|
Python
|
apache-2.0
| 631
|
"""
Copyright (c) 2017 SONATA-NFV and Paderborn University
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV, Paderborn University
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import uuid
class InstanceFlavor:
def __init__(self, name, cpu=None, memory=None, memory_unit=None, storage=None, storage_unit=None):
self.id = str(uuid.uuid4())
self.name = name
self.cpu = cpu
self.memory = memory
self.memory_unit = memory_unit
self.storage = storage
self.storage_unit = storage_unit
|
stevenvanrossem/son-emu
|
src/emuvim/api/openstack/resources/instance_flavor.py
|
Python
|
apache-2.0
| 1,486
|
from TestStack.White.UIItems.TreeItems import Tree
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
class TreeKeywords(LibraryComponent):
@keyword
def select_tree_node(self, locator, *node_path):
"""Selects a tree node.
``locator`` is the locator of the tree or Tree item object.
Locator syntax is explained in `Item locators`.
``node_path`` is the path the to node to select.
Example tree (tree locator is tree_id):
| root
| |
| |---parent_node
| | |
| | |---child_node
| | |
| | |---sibling_node
| |
| |---other parent
Example usage to select ``sibling node``:
| Select Tree Node | tree_id | root | parent_node | sibling_node |
"""
tree = self.state._get_typed_item_by_locator(Tree, locator)
tree.Nodes.GetItem(node_path).Select()
@keyword
def expand_tree_node(self, locator, *node_path):
"""Expands a tree node.
``locator`` is the locator of the tree or Tree item object.
Locator syntax is explained in `Item locators`.
``node_path`` is the path the to node to expand.
See examples of the node path in `Select Tree Node` documentation.
"""
tree = self.state._get_typed_item_by_locator(Tree, locator)
tree.Nodes.GetItem(node_path).Expand()
@keyword
def double_click_tree_node(self, locator, *node_path):
"""Double-clicks a tree node.
``locator`` is the locator of the tree or Tree item object.
Locator syntax is explained in `Item locators`.
``node_path`` is the path the to node to double-click.
See examples of the node path in `Select Tree Node` documentation.
"""
tree = self.state._get_typed_item_by_locator(Tree, locator)
tree.Nodes.GetItem(node_path).DoubleClick()
@keyword
def right_click_tree_node(self, locator, *node_path):
"""Right-clicks a tree node.
``locator`` is the locator of the tree or Tree item object.
Locator syntax is explained in `Item locators`.
``node_path`` is the path the to node to right-click.
See examples of the node path in `Select Tree Node` documentation.
"""
tree = self.state._get_typed_item_by_locator(Tree, locator)
tree.Nodes.GetItem(node_path).RightClick()
|
Omenia/robotframework-whitelibrary
|
src/WhiteLibrary/keywords/items/tree.py
|
Python
|
apache-2.0
| 2,478
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Generator/Sheet4
"""
import os
import glob
def Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV, Unit_front):
"""
This functions create the monthly and yearly sheet 4 in pdf format, based on the csv files.
Parameters
----------
Dir_Basin : str
Path to all the output data of the Basin
Basin : str
Name of the basin
Simulation : int
Defines the simulation
Dir_Basin_CSV : str
Data path pointing to the CSV output files
Unit_front : str
Defines the scaling of the CSV file
"""
# import wa module
from wa.Sheets import create_sheet4
# Create output folder for CSV files
Dir_Basin_PDF = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation, "PDF")
if not os.path.exists(Dir_Basin_PDF):
os.mkdir(Dir_Basin_PDF)
# find all the CSV's
os.chdir(Dir_Basin_CSV)
files = glob.glob('Sheet4_Sim%d*.csv' %Simulation)
# loop over CSV's files
for File in files:
# split the name
FileName_Splitted = File.split('_')
# If the splitted parts are 4 then it is a yearly sheet
if len(FileName_Splitted)==4:
# Define the output names
units = '%skm3/year' %(Unit_front)
Year = str(FileName_Splitted[-1].split('.')[0])
outFile1 = 'Sheet4a_Sim%s_%s_%s.pdf' %(Simulation, Basin, Year)
outFile2 = 'Sheet4b_Sim%s_%s_%s.pdf' %(Simulation, Basin, Year)
# create the sheet
create_sheet4(basin=Basin, period = Year, units = [units,units], data = [os.path.join(Dir_Basin_CSV,File),os.path.join(Dir_Basin_CSV,File)] , output = [os.path.join(Dir_Basin_PDF, outFile1),os.path.join(Dir_Basin_PDF, outFile2)], template=False, tolerance=1000)
# If the splitted parts are 5 then it is a monthly sheet
elif len(FileName_Splitted)==5:
# Define the output names
MonthInLetters = {1:'January',2:'February',3:'March',4:'April',5:'May',6:'June',7:'July',8:'August',9:'September',10:'October',11:'November',12:'December'}
units = '%skm3/month' %(Unit_front)
Year = str(FileName_Splitted[3])
Month = str(FileName_Splitted[-1].split('.')[0])
NameTime = '%s_%02s' %(Year, Month)
NameTimeSpace = '%s %s' %(Year, MonthInLetters[int(Month)])
outFile1 = 'Sheet4a_Sim%s_%s_%s.pdf' %(Simulation, Basin, NameTime)
outFile2 = 'Sheet4b_Sim%s_%s_%s.pdf' %(Simulation, Basin, NameTime)
# create the sheet
create_sheet4(basin=Basin, period = NameTimeSpace, units = [units,units] , data = [os.path.join(Dir_Basin_CSV,File),os.path.join(Dir_Basin_CSV,File)] , output = [os.path.join(Dir_Basin_PDF, outFile1),os.path.join(Dir_Basin_PDF, outFile2)],template=False, tolerance=0.2)
return()
|
wateraccounting/wa
|
Generator/Sheet4/PDF.py
|
Python
|
apache-2.0
| 3,012
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for downloading media from Google APIs."""
import urllib3.response # type: ignore
from google.resumable_media import _download
from google.resumable_media import common
from google.resumable_media import _helpers
from google.resumable_media.requests import _request_helpers
_CHECKSUM_MISMATCH = """\
Checksum mismatch while downloading:
{}
The X-Goog-Hash header indicated an {checksum_type} checksum of:
{}
but the actual {checksum_type} checksum of the downloaded contents was:
{}
"""
class Download(_request_helpers.RequestsMixin, _download.Download):
"""Helper to manage downloading a resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None. The default is "md5".
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
def _write_to_stream(self, response):
"""Write response body to a write-able stream.
.. note:
This method assumes that the ``_stream`` attribute is set on the
current download.
Args:
response (~requests.Response): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
"""
# Retrieve the expected checksum only once for the download request,
# then compute and validate the checksum when the full download completes.
# Retried requests are range requests, and there's no way to detect
# data corruption for that byte range alone.
if self._expected_checksum is None and self._checksum_object is None:
# `_get_expected_checksum()` may return None even if a checksum was
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
# If an invalid checksum type is specified, this will raise ValueError.
expected_checksum, checksum_object = _helpers._get_expected_checksum(
response, self._get_headers, self.media_url, checksum_type=self.checksum
)
self._expected_checksum = expected_checksum
self._checksum_object = checksum_object
else:
expected_checksum = self._expected_checksum
checksum_object = self._checksum_object
with response:
# NOTE: In order to handle compressed streams gracefully, we try
# to insert our checksum object into the decompression stream. If
# the stream is indeed compressed, this will delegate the checksum
# object to the decoder and return a _DoNothingHash here.
local_checksum_object = _add_decoder(response.raw, checksum_object)
body_iter = response.iter_content(
chunk_size=_request_helpers._SINGLE_GET_CHUNK_SIZE, decode_unicode=False
)
for chunk in body_iter:
self._stream.write(chunk)
self._bytes_downloaded += len(chunk)
local_checksum_object.update(chunk)
if expected_checksum is not None:
actual_checksum = _helpers.prepare_checksum_digest(checksum_object.digest())
if actual_checksum != expected_checksum:
msg = _CHECKSUM_MISMATCH.format(
self.media_url,
expected_checksum,
actual_checksum,
checksum_type=self.checksum.upper(),
)
raise common.DataCorruption(response, msg)
def consume(
self,
transport,
timeout=(
_request_helpers._DEFAULT_CONNECT_TIMEOUT,
_request_helpers._DEFAULT_READ_TIMEOUT,
),
):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
method, _, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
request_kwargs = {
"data": payload,
"headers": headers,
"timeout": timeout,
}
if self._stream is not None:
request_kwargs["stream"] = True
# Assign object generation if generation is specified in the media url.
if self._object_generation is None:
self._object_generation = _helpers._get_generation_from_url(self.media_url)
# Wrap the request business logic in a function to be retried.
def retriable_request():
url = self.media_url
# To restart an interrupted download, read from the offset of last byte
# received using a range request, and set object generation query param.
if self._bytes_downloaded > 0:
_download.add_bytes_range(
self._bytes_downloaded, self.end, self._headers
)
request_kwargs["headers"] = self._headers
# Set object generation query param to ensure the same object content is requested.
if (
self._object_generation is not None
and _helpers._get_generation_from_url(self.media_url) is None
):
query_param = {"generation": self._object_generation}
url = _helpers.add_query_parameters(self.media_url, query_param)
result = transport.request(method, url, **request_kwargs)
# If a generation hasn't been specified, and this is the first response we get, let's record the
# generation. In future requests we'll specify the generation query param to avoid data races.
if self._object_generation is None:
self._object_generation = _helpers._parse_generation_header(
result, self._get_headers
)
self._process_response(result)
if self._stream is not None:
self._write_to_stream(result)
return result
return _request_helpers.wait_and_retry(
retriable_request, self._get_status_code, self._retry_strategy
)
class RawDownload(_request_helpers.RawRequestsMixin, _download.Download):
"""Helper to manage downloading a raw resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None. The default is "md5".
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
def _write_to_stream(self, response):
"""Write response body to a write-able stream.
.. note:
This method assumes that the ``_stream`` attribute is set on the
current download.
Args:
response (~requests.Response): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
"""
# Retrieve the expected checksum only once for the download request,
# then compute and validate the checksum when the full download completes.
# Retried requests are range requests, and there's no way to detect
# data corruption for that byte range alone.
if self._expected_checksum is None and self._checksum_object is None:
# `_get_expected_checksum()` may return None even if a checksum was
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
# If an invalid checksum type is specified, this will raise ValueError.
expected_checksum, checksum_object = _helpers._get_expected_checksum(
response, self._get_headers, self.media_url, checksum_type=self.checksum
)
self._expected_checksum = expected_checksum
self._checksum_object = checksum_object
else:
expected_checksum = self._expected_checksum
checksum_object = self._checksum_object
with response:
body_iter = response.raw.stream(
_request_helpers._SINGLE_GET_CHUNK_SIZE, decode_content=False
)
for chunk in body_iter:
self._stream.write(chunk)
self._bytes_downloaded += len(chunk)
checksum_object.update(chunk)
response._content_consumed = True
if expected_checksum is not None:
actual_checksum = _helpers.prepare_checksum_digest(checksum_object.digest())
if actual_checksum != expected_checksum:
msg = _CHECKSUM_MISMATCH.format(
self.media_url,
expected_checksum,
actual_checksum,
checksum_type=self.checksum.upper(),
)
raise common.DataCorruption(response, msg)
def consume(
self,
transport,
timeout=(
_request_helpers._DEFAULT_CONNECT_TIMEOUT,
_request_helpers._DEFAULT_READ_TIMEOUT,
),
):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
method, _, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
request_kwargs = {
"data": payload,
"headers": headers,
"timeout": timeout,
"stream": True,
}
# Assign object generation if generation is specified in the media url.
if self._object_generation is None:
self._object_generation = _helpers._get_generation_from_url(self.media_url)
# Wrap the request business logic in a function to be retried.
def retriable_request():
url = self.media_url
# To restart an interrupted download, read from the offset of last byte
# received using a range request, and set object generation query param.
if self._bytes_downloaded > 0:
_download.add_bytes_range(
self._bytes_downloaded, self.end, self._headers
)
request_kwargs["headers"] = self._headers
# Set object generation query param to ensure the same object content is requested.
if (
self._object_generation is not None
and _helpers._get_generation_from_url(self.media_url) is None
):
query_param = {"generation": self._object_generation}
url = _helpers.add_query_parameters(self.media_url, query_param)
result = transport.request(method, url, **request_kwargs)
# If a generation hasn't been specified, and this is the first response we get, let's record the
# generation. In future requests we'll specify the generation query param to avoid data races.
if self._object_generation is None:
self._object_generation = _helpers._parse_generation_header(
result, self._get_headers
)
self._process_response(result)
if self._stream is not None:
self._write_to_stream(result)
return result
return _request_helpers.wait_and_retry(
retriable_request, self._get_status_code, self._retry_strategy
)
class ChunkedDownload(_request_helpers.RequestsMixin, _download.ChunkedDownload):
"""Download a resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
def consume_next_chunk(
self,
transport,
timeout=(
_request_helpers._DEFAULT_CONNECT_TIMEOUT,
_request_helpers._DEFAULT_READ_TIMEOUT,
),
):
"""Consume the next chunk of the resource to be downloaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
ValueError: If the current download has finished.
"""
method, url, payload, headers = self._prepare_request()
# Wrap the request business logic in a function to be retried.
def retriable_request():
# NOTE: We assume "payload is None" but pass it along anyway.
result = transport.request(
method,
url,
data=payload,
headers=headers,
timeout=timeout,
)
self._process_response(result)
return result
return _request_helpers.wait_and_retry(
retriable_request, self._get_status_code, self._retry_strategy
)
class RawChunkedDownload(_request_helpers.RawRequestsMixin, _download.ChunkedDownload):
"""Download a raw resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
def consume_next_chunk(
self,
transport,
timeout=(
_request_helpers._DEFAULT_CONNECT_TIMEOUT,
_request_helpers._DEFAULT_READ_TIMEOUT,
),
):
"""Consume the next chunk of the resource to be downloaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
ValueError: If the current download has finished.
"""
method, url, payload, headers = self._prepare_request()
# Wrap the request business logic in a function to be retried.
def retriable_request():
# NOTE: We assume "payload is None" but pass it along anyway.
result = transport.request(
method,
url,
data=payload,
headers=headers,
stream=True,
timeout=timeout,
)
self._process_response(result)
return result
return _request_helpers.wait_and_retry(
retriable_request, self._get_status_code, self._retry_strategy
)
def _add_decoder(response_raw, checksum):
"""Patch the ``_decoder`` on a ``urllib3`` response.
This is so that we can intercept the compressed bytes before they are
decoded.
Only patches if the content encoding is ``gzip``.
Args:
response_raw (urllib3.response.HTTPResponse): The raw response for
an HTTP request.
checksum (object):
A checksum which will be updated with compressed bytes.
Returns:
object: Either the original ``checksum`` if ``_decoder`` is not
patched, or a ``_DoNothingHash`` if the decoder is patched, since the
caller will no longer need to hash to decoded bytes.
"""
encoding = response_raw.headers.get("content-encoding", "").lower()
if encoding != "gzip":
return checksum
response_raw._decoder = _GzipDecoder(checksum)
return _helpers._DoNothingHash()
class _GzipDecoder(urllib3.response.GzipDecoder):
"""Custom subclass of ``urllib3`` decoder for ``gzip``-ed bytes.
Allows a checksum function to see the compressed bytes before they are
decoded. This way the checksum of the compressed value can be computed.
Args:
checksum (object):
A checksum which will be updated with compressed bytes.
"""
def __init__(self, checksum):
super(_GzipDecoder, self).__init__()
self._checksum = checksum
def decompress(self, data):
"""Decompress the bytes.
Args:
data (bytes): The compressed bytes to be decompressed.
Returns:
bytes: The decompressed bytes from ``data``.
"""
self._checksum.update(data)
return super(_GzipDecoder, self).decompress(data)
|
googleapis/google-resumable-media-python
|
google/resumable_media/requests/download.py
|
Python
|
apache-2.0
| 24,705
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20151012_1611'),
]
operations = [
migrations.AlterField(
model_name='draft',
name='banner2',
field=image_cropping.fields.ImageRatioField('image_banner2', '1920x1200', help_text=None, adapt_rotation=False, hide_image_field=False, allow_fullsize=False, free_crop=False, verbose_name='banner2', size_warning=False),
),
migrations.AlterField(
model_name='post',
name='banner2',
field=image_cropping.fields.ImageRatioField('image_banner2', '1920x1200', help_text=None, adapt_rotation=False, hide_image_field=False, allow_fullsize=False, free_crop=False, verbose_name='banner2', size_warning=False),
),
]
|
rogerhil/flaviabernardes
|
flaviabernardes/flaviabernardes/blog/migrations/0006_auto_20151027_1121.py
|
Python
|
apache-2.0
| 942
|
import multiprocessing, sys, random
import model
pool = None
def get_pool():
global pool
if pool is None:
pool = multiprocessing.Pool()
return pool
def search_d1_process_opponent(mxm, gm, maxpi, minpi, init):
maxp, minp = gm.plebeians[maxpi], gm.plebeians[minpi]
acts = gm.NumActions()
maxmv = gm.ActionToMove(maxp, mxm)
if maxmv.dstpair not in gm.board or (maxmv.dstpair == maxmv.srcpair):
return None
best = None
for mnm in range(acts):
minmv = gm.ActionToMove(minp, mnm)
if minmv.dstpair not in gm.board or (minmv.dstpair == minmv.srcpair):
continue
gm.board = init.Copy()
gm.locked.clear()
gm.pending_moves.clear()
gm.PoseAgentMove(maxp, mxm)
gm.PoseAgentMove(minp, mnm)
rwx, rwn = gm.RewardScalar(maxp), gm.RewardScalar(minp)
loss = rwn - rwx
minp.Events()
maxp.Events()
gm.GlobalEvents()
if best is None or loss > best[0]:
best = (loss, mxm, mnm)
if best is not None:
minmv = gm.ActionToMove(minp, best[2])
score = best[0]
else:
minmv = model.Move(minp, (-1, -1), (-1, -1))
score = '\x1b[1;31mNO BEST'
print(f'\x1b[G{mxm:04d} / {acts:04d}: \x1b[1;36m{score} \x1b[32mme: {maxmv.srcpair} -> {maxmv.dstpair} \x1b[31madv: {minmv.srcpair} -> {minmv.dstpair}\x1b[m', end='')
sys.stdout.flush()
return best
def search_d1(gm, maxp):
maxpi = 1 if maxp is gm.plebeians[0] else 0
minpi = 0 if maxpi == 1 else 1
init = gm.board.Copy()
acts = gm.NumActions()
best = None
maxs = list(get_pool().starmap(search_d1_process_opponent, ((i, gm, maxpi, minpi, init) for i in range(acts))))
for cand in maxs:
if cand is None:
continue
if best is None or cand[0] < best[0]:
best = cand
best = random.choice([i for i in maxs if i is not None and i[0] == best[0]])
gm.board = init.Copy()
gm.locked.clear()
return best
def search_d0(gm, maxp, rf=None):
if rf is None:
rf = model.Game.RewardScalar
minp = gm.plebeians[0] if maxp is gm.plebeians[1] else gm.plebeians[1]
init = gm.board.Copy()
acts = gm.NumActions()
best = None
for act in range(acts):
gm.board = init.Copy()
gm.locked.clear()
gm.pending_moves = {minp: ((-1, -1), (-1, -1))}
gm.PoseAgentMove(maxp, act)
rw = rf(gm, act, init)
minp.Events()
maxp.Events()
gm.GlobalEvents()
if best is None or rw > best[0]:
best = (rw, act)
return best
|
cmr/automatafl
|
old_python_prototype/search.py
|
Python
|
apache-2.0
| 2,634
|
import os, logging
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
defineOutputDirProperty('OUTPUT_DIR', None)
from xpybuild.targets.copy import Copy
for i in range(0, 3500):
Copy('${OUTPUT_DIR}/output%s/'%i, FindPaths('${OUTPUT_DIR}/../input-files/', includes='**/*.txt'))
|
xpybuild/xpybuild
|
tests/performance/Dependency_FindPaths_NonTarget_Performance/Input/test.xpybuild.py
|
Python
|
apache-2.0
| 337
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Displays the permissions that a user role or subnetwork may be granted.
To get a subnetwork ID, run get_subnetworks.py.
A user role may not be set with more permissions than the subnetwork it
belongs to. You may enter a subnetwork ID to see the maximum permissions a
user role belonging to it can have, or enter '0' as the subnetwork ID to see
all possible permissions.
Tags: userrole.getAvailablePermissions
"""
__author__ = 'Joseph DiLallo'
# Import appropriate classes from the client library.
from googleads import dfa
SUBNETWORK_ID = 'INSERT_SUBNETWORK_ID_HERE'
def main(client, subnetwork_id):
# Initialize appropriate service.
user_role_service = client.GetService(
'userrole', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Get available permissions.
results = user_role_service.getAvailablePermissions(subnetwork_id)
# Display permission name and its ID.
if results:
for permission in results:
print ('Permission with name \'%s\' and ID \'%s\' was found.'
% (permission['name'], permission['id']))
else:
print 'No permissions found.'
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, SUBNETWORK_ID)
|
jdilallo/jdilallo-test
|
examples/dfa/v1_20/get_available_permissions.py
|
Python
|
apache-2.0
| 1,872
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to be used by user code when invoking Transform."""
import enum
# Enum used in stats_options_updater_fn to specify which stats are being
# updated.
class StatsType(enum.Enum):
UNKNOWN = 0
PRE_TRANSFORM = 1
POST_TRANSFORM = 2
|
tensorflow/tfx
|
tfx/components/transform/stats_options_util.py
|
Python
|
apache-2.0
| 843
|
import unittest
from data_cube_utilities import dc_fractional_coverage_classifier
class TestFractionalCover(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_frac_coverage_classify(self):
pass
|
ceos-seo/data_cube_utilities
|
test/test_dc_fractional_coverage_classifier.py
|
Python
|
apache-2.0
| 261
|
# Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
from datetime import datetime
from unittest import TestCase
from unittest.mock import Mock, patch
from ona_service.service import Service
class AwesomeAndTotallySweetService(Service):
def __init__(self, **kwargs):
kwargs.setdefault('data_type', 'datum')
kwargs.setdefault('poll_seconds', 0)
super().__init__(**kwargs)
self.called = False
def execute(self, now=None):
self.called = True
class ServiceTestCase(TestCase):
def test_thread(self):
service = AwesomeAndTotallySweetService()
def killer(signum, frame):
service.stop()
signal.signal(signal.SIGALRM, killer)
signal.alarm(1)
service.run()
self.assertTrue(service.called)
@patch('ona_service.service.utcnow', autospec=True)
@patch('ona_service.service.sleep', autospec=True)
def test_sleep(self, mock_sleep, mock_utcnow):
t1 = datetime(2015, 10, 1, 1, 30)
t2 = datetime(2015, 10, 1, 1, 30)
mock_utcnow.side_effect = [t1, t2]
service = AwesomeAndTotallySweetService(poll_seconds=30)
service.stop_event = Mock()
service.stop_event.is_set.side_effect = [False, False, True]
service.run()
self.assertTrue(service.called)
mock_sleep.assert_called_once_with(30)
@patch('ona_service.service.utcnow', autospec=True)
@patch('ona_service.service.sleep', autospec=True)
def test_sleep__short(self, mock_sleep, mock_utcnow):
t1 = datetime(2015, 10, 1, 1, 30)
t2 = datetime(2015, 10, 1, 1, 30, 3)
mock_utcnow.side_effect = [t1, t2]
service = AwesomeAndTotallySweetService(poll_seconds=30)
service.stop_event = Mock()
service.stop_event.is_set.side_effect = [False, False, True]
service.run()
self.assertTrue(service.called)
mock_sleep.assert_called_once_with(27) # 30 - 3
@patch('ona_service.service.utcnow', autospec=True)
@patch('ona_service.service.sleep', autospec=True)
def test_sleep__long_execute(self, mock_sleep, mock_utcnow):
t1 = datetime(2015, 10, 1, 1, 30)
t2 = datetime(2015, 10, 1, 1, 30, 31)
mock_utcnow.side_effect = [t1, t2]
service = AwesomeAndTotallySweetService(poll_seconds=30)
service.stop_event = Mock()
service.stop_event.is_set.side_effect = [False, False, True]
service.run()
self.assertTrue(service.called)
# if we take more than poll_seconds to run the job, sleep(0) and jump
# right back in.
mock_sleep.assert_called_once_with(0)
|
obsrvbl/ona
|
src/scripts/tests/test_service.py
|
Python
|
apache-2.0
| 3,205
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing floating IPs.
"""
import json
import netaddr
from django.core.urlresolvers import reverse_lazy # noqa
from django.http import HttpResponse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.generic import View # noqa
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import forms as project_forms
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import workflows as project_workflows
class AssociateView(workflows.WorkflowView):
workflow_class = project_workflows.IPAssociationWorkflow
class AllocateView(forms.ModalFormView):
form_class = project_forms.FloatingIpAllocate
template_name = 'project/access_and_security/floating_ips/allocate.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_display(self, obj):
return obj.ip
def get_context_data(self, **kwargs):
context = super(AllocateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_quota_usages(self.request)
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
try:
pools = api.network.floating_ip_pools_list(self.request)
except Exception:
pools = []
exceptions.handle(self.request,
_("Unable to retrieve floating IP pools."))
pool_list = [(pool.id, pool.name) for pool in pools]
if not pool_list:
pool_list = [(None, _("No floating IP pools available."))]
return {'pool_list': pool_list}
def serializer(value, level=0, max_level=6):
if level > max_level:
return "?"
if isinstance(value, (dict, api.base.APIDictWrapper)):
rtn = {}
for k, v in value.items():
rtn[k] = serializer(v, level=level + 1, max_level=max_level)
return rtn
if isinstance(value, (list, tuple)):
rtn = []
for o in value:
rtn.append(serializer(o, level=level + 1, max_level=max_level))
return rtn
return value
class JSONView(View):
def get(self, request, *args, **kwargs):
search_opts = {}
for key in request.GET.keys():
search_opts[key] = request.GET.get(key)
try:
floating_ips = api.network.tenant_floating_ip_list(request,
**search_opts)
except Exception:
msg = _('Unable to retrieve floating ip list".')
exceptions.handle(self.request, msg)
resp = json.dumps(floating_ips, default=serializer, check_circular=False)
return HttpResponse(resp, mimetype='application/json')
|
kaiweifan/horizon
|
openstack_dashboard/dashboards/project/access_and_security/floating_ips/views.py
|
Python
|
apache-2.0
| 3,863
|
from neptune.base_classes import Error404
class NRouter(object):
"""
The URL route handling class
Rule is defined as:
[
{ 'route': '/', 'class': 'cname' },
{ 'route': '/b/:id', 'class': 'cname' },
{ 'route': '/home', 'class': 'cname' },
]
"""
# TODO Add Validator for rules
# TODO Decide whether regex or not, route priority etc.
def __init__(self, rules=[]):
self.rules = rules
def add_rule(self, route, cls):
self.rules.append(
{'route': route, 'class': cls})
def get_cls(self, route):
# TODO Fix it
for a in self.rules:
if a['route'] == route:
return a['class']()
return Error404()
|
NeptuneFramework/neptune
|
neptune/router.py
|
Python
|
apache-2.0
| 760
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# See LICENSE file for details.
from flocker.node.agents.blockdevice import (
VolumeException, AlreadyAttachedVolume,
UnknownVolume, UnattachedVolume,
IBlockDeviceAPI, _blockdevicevolume_from_dataset_id,
BlockDeviceVolume
)
from uuid import uuid4, UUID
from zope.interface import implementer
from twisted.python.filepath import FilePath
from huawei_oceanstor_flocker_plugin import rest_client
from huawei_oceanstor_flocker_plugin import huawei_utils
from huawei_oceanstor_flocker_plugin.log import LOG
import json
@implementer(IBlockDeviceAPI)
class HuaweiBlockDeviceAPI(object):
"""
Huawei driver implemented ``IBlockDeviceAPI``.
"""
def __init__(self, cluster_id, huawei_conf_file,
compute_instance_id=None,
allocation_unit=None):
"""
:param cluster_id: An ID that include in the
names of Huawei volumes to identify cluster.
:param huawei_conf_file: The path of huawei config file.
:param compute_instance_id: An ID that used to create
host on the array to identify node.
:param allocation_unit: Allocation unit on array.
:returns: A ``BlockDeviceVolume``.
"""
LOG.info("Huawei block device init")
self._host_id = None
self._hostgroup_id = None
self.xml_file_path = huawei_conf_file
self.configuration = huawei_utils.get_login_info(
self.xml_file_path)
self.restclient = rest_client.RestClient(self.configuration)
self.restclient.login()
if compute_instance_id is None:
compute_instance_id = huawei_utils.get_instance_id(
self.xml_file_path)
self._compute_instance_id = compute_instance_id
self._cluster_id = cluster_id
if allocation_unit is None:
allocation_unit = 512
self._allocation_unit = allocation_unit
LOG.info("Finish huawei block device init")
def allocation_unit(self):
"""
The size, in bytes up to which ``IDeployer`` will round volume
sizes before calling ``IBlockDeviceAPI.create_volume``.
:returns: ``int``
"""
LOG.info("Call allocation_unit")
return self._allocation_unit
def compute_instance_id(self):
"""
Get an identifier for this node.
This will be compared against ``BlockDeviceVolume.attached_to``
to determine which volumes are locally attached and it will be used
with ``attach_volume`` to locally attach volumes.
:returns: A ``unicode`` object giving a provider-specific node
identifier which identifies the node where the method is run.
"""
LOG.info("Call compute_instance_id = %s" % self._compute_instance_id)
return unicode(self._compute_instance_id)
def create_volume(self, dataset_id, size):
"""
Create a new volume.
When called by ``IDeployer``, the supplied size will be
rounded up to the nearest ``IBlockDeviceAPI.allocation_unit()``
:param UUID dataset_id: The Flocker dataset ID of the dataset on this
volume.
:param int size: The size of the new volume in bytes.
:returns: A ``BlockDeviceVolume``.
"""
LOG.info("Call create_volume, dataset_id=%s, size=%d"
% (dataset_id, size))
name = huawei_utils.encode_name(dataset_id, self._cluster_id)
parameters = huawei_utils.get_lun_conf_params(self.xml_file_path)
if parameters is None:
raise VolumeException
pool_name = huawei_utils.get_pools(self.xml_file_path)
if pool_name is None:
raise VolumeException
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name,
pools)
lun_param = {"TYPE": '11',
"NAME": name,
"PARENTTYPE": '216',
"PARENTID": pool_info['ID'],
"ALLOCTYPE": parameters['LUNType'],
"CAPACITY": str((size/512)),
"WRITEPOLICY": parameters['WriteType'],
"MIRRORPOLICY": parameters['MirrorSwitch'],
"PREFETCHPOLICY": parameters['PrefetchType'],
"PREFETCHVALUE": parameters['PrefetchValue'],
"DATATRANSFERPOLICY": parameters['policy'],
"READCACHEPOLICY": parameters['readcachepolicy'],
"WRITECACHEPOLICY": parameters['writecachepolicy']}
url = "/lun"
data = json.dumps(lun_param)
result = self.restclient.call(url, data)
lun_info = result['data']
volume = BlockDeviceVolume(
size=int(lun_info['CAPACITY'])*512,
attached_to=None,
dataset_id=huawei_utils.decode_name(lun_info['NAME'],
self._cluster_id),
blockdevice_id=unicode(lun_info['ID'])
)
return volume
def destroy_volume(self, blockdevice_id):
"""
Destroy an existing volume.
:param unicode blockdevice_id: The unique identifier for the volume to
destroy.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:return: ``None``
"""
LOG.info("Call destroy_volume blockdevice_id=%s" % blockdevice_id)
try:
self.restclient.delete_lun(blockdevice_id)
except Exception:
raise UnknownVolume(blockdevice_id)
def initialize_connection_iscsi(self):
"""
TODO: Initialize iscsi connection.
"""
initiator_name = huawei_utils.iscsi_get_initiator()
if initiator_name is None:
raise VolumeException
# Create hostgroup if not exist.
host_id = self.restclient.add_host_with_check(
self._compute_instance_id)
# Add initiator to the host.
self.restclient.ensure_initiator_added(self.xml_file_path,
initiator_name,
host_id)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
self._host_id = host_id
self._hostgroup_id = hostgroup_id
def initialize_connection_fc(self):
"""
TODO: Initialize fc connection.
"""
wwns = huawei_utils.get_fc_wwpns()
if not wwns:
raise VolumeException
# Create hostgroup if not exist.
host_id = self.restclient.add_host_with_check(
self._compute_instance_id)
online_wwns_in_host = (
self.restclient.get_host_online_fc_initiators(host_id))
online_free_wwns = self.restclient.get_online_free_wwns()
for wwn in wwns:
if (wwn not in online_wwns_in_host and
wwn not in online_free_wwns):
wwns_in_host = (
self.restclient.get_host_initiators("fc", host_id))
iqns_in_host = (
self.restclient.get_host_initiators("iscsi", host_id))
if not wwns_in_host and not iqns_in_host:
self.restclient.remove_host(host_id)
LOG.error('Can not add FC initiator to host.')
raise VolumeException
for wwn in wwns:
if wwn in online_free_wwns:
self.restclient.add_fc_port_to_host(host_id, wwn)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
self._host_id = host_id
self._hostgroup_id = hostgroup_id
def initialize_connection(self):
protocol = huawei_utils.get_protocol_info(self.xml_file_path)
if protocol is None:
raise VolumeException
if protocol == 'iSCSI':
self.initialize_connection_iscsi()
else:
self.initialize_connection_fc()
def attach_volume(self, blockdevice_id, attach_to):
"""
Attach ``blockdevice_id`` to the node indicated by ``attach_to``.
:param unicode blockdevice_id: The unique identifier for the block
device being attached.
:param unicode attach_to: An identifier like the one returned by the
``compute_instance_id`` method indicating the node to which to
attach the volume.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
already attached.
:returns: A ``BlockDeviceVolume`` with a ``attached_to`` attribute set
to ``attach_to``.
"""
LOG.info("Call attach_volume blockdevice_id=%s, attach_to=%s"
% (blockdevice_id, attach_to))
try:
lun_info = self.restclient.get_lun_info(blockdevice_id)
except Exception:
raise UnknownVolume(blockdevice_id)
if lun_info['EXPOSEDTOINITIATOR'].lower() == 'true':
raise AlreadyAttachedVolume(blockdevice_id)
self.initialize_connection()
self.restclient.do_mapping(blockdevice_id, self._hostgroup_id,
self._host_id)
huawei_utils.rescan_scsi()
lun_info = self.restclient.get_lun_info(blockdevice_id)
attached_volume = BlockDeviceVolume(
size=int(lun_info['CAPACITY'])*512,
attached_to=unicode(attach_to),
dataset_id=huawei_utils.decode_name(
lun_info['NAME'], self._cluster_id),
blockdevice_id=blockdevice_id)
return attached_volume
def detach_volume(self, blockdevice_id):
"""
Detach ``blockdevice_id`` from whatever host it is attached to.
:param unicode blockdevice_id: The unique identifier for the block
device being detached.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to anything.
:returns: ``None``
"""
LOG.info("Call detach_volume blockdevice_id=%s" % blockdevice_id)
device = self.get_device_path(blockdevice_id)
if device is not None:
huawei_utils.remove_scsi_device(device)
lun_info = self.restclient.get_lun_info(blockdevice_id)
if self.get_attached_to(lun_info) is not None:
self.restclient.delete_mapping(
blockdevice_id, self._compute_instance_id)
else:
LOG.error("Volume %s not attached." % blockdevice_id)
raise UnattachedVolume(blockdevice_id)
def get_attached_to(self, item):
"""
TODO: Find a way to save the attach_to information.
"""
LOG.info("Call get_attached_to")
if item['ISADD2LUNGROUP'] == 'true':
result = self.restclient.get_host_of_lun_map(item['ID'])
if 'data' in result:
return result['data'][0]['NAME']
return None
def list_volumes(self):
"""
List all the block devices available via the back end API.
:returns: A ``list`` of ``BlockDeviceVolume``s.
"""
LOG.info("Call list_volumes")
volumes = []
url = "/lun?range=[0-65535]"
result = self.restclient.call(url, None, "GET")
if 'data' in result:
for item in result['data']:
if huawei_utils.is_cluster_volume(
item['NAME'], self._cluster_id):
volume = BlockDeviceVolume(
size=int(item['CAPACITY'])*512,
attached_to=self.get_attached_to(item),
dataset_id=huawei_utils.decode_name(
item['NAME'], self._cluster_id),
blockdevice_id=unicode(item['ID'])
)
volumes.append(volume)
return volumes
def get_device_path(self, blockdevice_id):
"""
Return the device path that has been allocated to the block device on
the host to which it is currently attached.
:param unicode blockdevice_id: The unique identifier for the block
device.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to a host.
:returns: A ``FilePath`` for the device.
"""
LOG.info("Call get_device_path")
# no mulitpath
try:
lun_info = self.restclient.get_lun_info(blockdevice_id)
except Exception:
raise UnknownVolume(blockdevice_id)
if lun_info['EXPOSEDTOINITIATOR'].lower() == 'false':
raise UnattachedVolume(blockdevice_id)
lun_wwn = lun_info['WWN']
for bd in huawei_utils.get_all_block_device():
bd_wwn = huawei_utils.get_wwn_of_deviceblock(bd)
if bd_wwn is not None and lun_wwn in bd_wwn:
LOG.info("device_path finded: %s" % bd)
return FilePath("/dev/"+bd)
return None
|
huaweistorage/huawei-oceanstor-flocker-plugin
|
huawei_oceanstor_flocker_plugin/huawei_oceanstor_blockdevice.py
|
Python
|
apache-2.0
| 13,417
|
import unittest, random, sys, time, getpass
sys.path.extend(['.','..','py'])
# FIX! add cases with shuffled data!
import h2o, h2o_cmd, h2o_hosts, h2o_gbm
import h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e, h2o_jobs as h2j
DO_PLOT_IF_KEVIN = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost, tryHeap
tryHeap = 28
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1, enable_benchmark_log=True, java_heap_GB=tryHeap)
else:
h2o_hosts.build_cloud_with_hosts(enable_benchmark_log=True)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBM_poker_1m(self):
for trial in range(2):
# PARSE train****************************************
h2o.beta_features = False #turn off beta_features
start = time.time()
xList = []
eList = []
fList = []
modelKey = 'GBMModelKey'
timeoutSecs = 900
# Parse (train)****************************************
if h2o.beta_features:
print "Parsing to fvec directly! Have to noPoll=true!, and doSummary=False!"
csvPathname = 'poker/poker-hand-testing.data'
hex_key = 'poker-hand-testing.data.hex'
parseTrainResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=timeoutSecs, noPoll=h2o.beta_features, doSummary=False)
# hack
if h2o.beta_features:
h2j.pollWaitJobs(timeoutSecs=timeoutSecs, pollTimeoutSecs=timeoutSecs)
print "Filling in the parseTrainResult['destination_key'] for h2o"
parseTrainResult['destination_key'] = trainKey
elapsed = time.time() - start
print "train parse end on ", csvPathname, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "train parse result:", parseTrainResult['destination_key']
# Logging to a benchmark file
algo = "Parse"
l = '{:d} jvms, {:d}GB heap, {:s} {:s} {:6.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, algo, csvPathname, elapsed)
print l
h2o.cloudPerfH2O.message(l)
# if you set beta_features here, the fvec translate will happen with the Inspect not the GBM
# h2o.beta_features = True
inspect = h2o_cmd.runInspect(key=parseTrainResult['destination_key'])
print "\n" + csvPathname, \
" num_rows:", "{:,}".format(inspect['num_rows']), \
" num_cols:", "{:,}".format(inspect['num_cols'])
num_rows = inspect['num_rows']
num_cols = inspect['num_cols']
### h2o_cmd.runSummary(key=parsTraineResult['destination_key'])
# GBM(train iterate)****************************************
h2o.beta_features = True
ntrees = 2
for max_depth in [5,10,20]:
params = {
'learn_rate': .1,
'nbins': 10,
'ntrees': ntrees,
'max_depth': max_depth,
'min_rows': 10,
'response': num_cols-1,
'ignored_cols_by_name': None,
}
print "Using these parameters for GBM: ", params
kwargs = params.copy()
h2o.beta_features = True
trainStart = time.time()
gbmTrainResult = h2o_cmd.runGBM(parseResult=parseTrainResult,
noPoll=h2o.beta_features, timeoutSecs=timeoutSecs, destination_key=modelKey, **kwargs)
# hack
if h2o.beta_features:
h2j.pollWaitJobs(timeoutSecs=timeoutSecs, pollTimeoutSecs=timeoutSecs)
trainElapsed = time.time() - trainStart
print "GBM training completed in", trainElapsed, "seconds. On dataset: ", csvPathname
# Logging to a benchmark file
algo = "GBM " + " ntrees=" + str(ntrees) + " max_depth=" + str(max_depth)
l = '{:d} jvms, {:d}GB heap, {:s} {:s} {:6.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, algo, csvPathname, trainElapsed)
print l
h2o.cloudPerfH2O.message(l)
gbmTrainView = h2o_cmd.runGBMView(model_key=modelKey)
# errrs from end of list? is that the last tree?
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "GBM 'errsLast'", errsLast
cm = gbmTrainView['gbm_model']['cms'][-1]['_arr'] # use the last one
pctWrongTrain = h2o_gbm.pp_cm_summary(cm);
print "Last line of this cm might be NAs, not CM"
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
# xList.append(ntrees)
xList.append(max_depth)
eList.append(pctWrongTrain)
fList.append(trainElapsed)
h2o.beta_features = False
# just plot the last one
if DO_PLOT_IF_KEVIN:
xLabel = 'max_depth'
eLabel = 'pctWrong'
fLabel = 'trainElapsed'
eListTitle = ""
fListTitle = ""
h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel)
if __name__ == '__main__':
h2o.unit_main()
|
janezhango/BigDataMachineLearning
|
py/testdir_multi_jvm/test_GBM_poker_1m.py
|
Python
|
apache-2.0
| 5,772
|
# data.world-py
# Copyright 2017 data.world, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This product includes software developed at
# data.world, Inc.(http://data.world/).
from __future__ import absolute_import
import configparser
import os
import tempfile
from os import path
import pytest
from doublex import assert_that
from hamcrest import equal_to, is_not, is_, calling, raises, has_length, none
from six import StringIO
from datadotworld.config import FileConfig, EnvConfig, DefaultConfig, \
ChainedConfig, InlineConfig
# Shared fixtures
@pytest.fixture()
def config_directory(tmpdir):
return os.makedirs(str(tmpdir.join('.dw')))
@pytest.fixture()
def config_file_path(tmpdir):
return str(tmpdir.join('.dw/config'))
@pytest.fixture()
def default_config_file(config_file_path):
config_parser = configparser.ConfigParser()
config_parser.set(configparser.DEFAULTSECT, 'auth_token', 'file_token')
config_parser.write(open(config_file_path, 'w'))
# Tests
class TestDefaultConfig:
def test_auth_token(self):
assert_that(DefaultConfig().auth_token, none())
def test_cache_dir(self):
assert_that(DefaultConfig().cache_dir,
equal_to(path.expanduser('~/.dw/cache')))
def test_tmp_dir(self):
assert_that(DefaultConfig().tmp_dir,
equal_to(path.expanduser(tempfile.gettempdir())))
class TestInlineConfig:
def test_auth_token(self):
config = InlineConfig('inline_token')
assert_that(config.auth_token, equal_to('inline_token'))
def test_cache_dir(self):
config = InlineConfig('inline_token')
assert_that(config.cache_dir,
equal_to(path.expanduser('~/.dw/cache')))
def test_tmp_dir(self):
config = InlineConfig('inline_token')
assert_that(config.tmp_dir,
equal_to(path.expanduser(tempfile.gettempdir())))
class TestEnvConfig:
def test_auth_token(self, monkeypatch):
monkeypatch.setattr(os, 'environ', {'DW_AUTH_TOKEN': 'env_token'})
assert_that(EnvConfig().auth_token, equal_to('env_token'))
def test_cache_dir(self, monkeypatch):
monkeypatch.setattr(os, 'environ', {'DW_CACHE_DIR': 'env_cache_dir'})
assert_that(EnvConfig().cache_dir, equal_to('env_cache_dir'))
def test_tmp_dir(self, monkeypatch):
monkeypatch.setattr(os, 'environ', {'DW_TMP_DIR': 'env_tmp_dir'})
assert_that(EnvConfig().tmp_dir, equal_to('env_tmp_dir'))
class TestFileConfig:
# Fixtures
@pytest.fixture()
def legacy_file_path(self, tmpdir):
return str(tmpdir.join('.data.world'))
@pytest.fixture()
def default_invalid_config_file(self, config_file_path):
config_parser = configparser.ConfigParser()
config_parser.read_file(StringIO('[default]'))
config_parser.set('default', 'auth_token', 'lower_case_default')
config_parser.write(open(config_file_path, 'w'))
@pytest.fixture()
def alternative_config_file(self, config_file_path):
config_parser = configparser.ConfigParser()
config_parser.add_section('alternative')
config_parser.set('alternative', 'auth_token', 'alternativeabcd')
config_parser.write(open(config_file_path, 'w'))
@pytest.fixture()
def legacy_config_file(self, legacy_file_path):
with open(legacy_file_path, 'w') as legacy_file:
legacy_file.write('token=legacyabcd')
@pytest.fixture()
def unsuitable_legacy_config_file(self, legacy_file_path):
with open(legacy_file_path, 'w') as legacy_file:
legacy_file.write('fdasfsadfasda\nhlihfas=hilfa\ntoken')
# Tests
@pytest.mark.usefixtures('config_directory', 'default_config_file')
def test_auth_token(self, config_file_path):
config = FileConfig(config_file_path=config_file_path)
assert_that(config.auth_token, equal_to('file_token'))
@pytest.mark.usefixtures('config_directory', 'alternative_config_file')
def test_alternative_token(self, config_file_path):
config = FileConfig(profile='alternative',
config_file_path=config_file_path)
assert_that(config.auth_token, equal_to('alternativeabcd'))
@pytest.mark.usefixtures('legacy_config_file')
def test_legacy_token(self, legacy_file_path, config_file_path):
assert_that(path.isfile(config_file_path), is_(False))
config = FileConfig(legacy_file_path=legacy_file_path,
config_file_path=config_file_path)
assert_that(config.auth_token, equal_to('legacyabcd'))
assert_that(path.isfile(config_file_path), is_(True))
@pytest.mark.usefixtures('config_directory', 'default_invalid_config_file')
def test_invalid_config_section(self, config_file_path):
config = FileConfig(config_file_path=config_file_path)
assert_that(config.auth_token, equal_to('lower_case_default'))
assert_that(config._config_parser.sections(), has_length(0))
def test_missing_file(self, config_file_path):
assert_that(path.isfile(config_file_path), is_(False))
config = FileConfig(config_file_path=config_file_path)
assert_that(calling(lambda: config.auth_token), raises(RuntimeError))
@pytest.mark.usefixtures('unsuitable_legacy_config_file')
def test_missing_file_unsuitable_legacy_file(self, config_file_path):
assert_that(path.isfile(config_file_path), is_(False))
config = FileConfig(config_file_path=config_file_path)
assert_that(calling(lambda: config.auth_token), raises(RuntimeError))
@pytest.mark.usefixtures('config_directory', 'default_config_file')
def test_missing_token(self, config_file_path):
assert_that(path.isfile(config_file_path), is_(True))
config = FileConfig(profile='missingprofile',
config_file_path=config_file_path)
assert_that(calling(lambda: config.auth_token), raises(RuntimeError))
def test_save(self, config_file_path):
assert_that(path.isfile(config_file_path), is_(False))
config = FileConfig(config_file_path=config_file_path)
config.auth_token = 'brandnewtoken'
config.save()
config_reload = FileConfig(config_file_path=config_file_path)
assert_that(path.isfile(config_file_path), is_(True))
assert_that(config_reload.auth_token, equal_to(config.auth_token))
@pytest.mark.usefixtures('config_directory', 'default_config_file')
def test_save_overwrite(self, config_file_path):
config = FileConfig(config_file_path=config_file_path)
assert_that(config_file_path, is_not(equal_to('newtoken')))
config.auth_token = 'newtoken'
config.save()
config_reloaded = FileConfig(config_file_path=config_file_path)
assert_that(config_reloaded.auth_token, equal_to('newtoken'))
class TestChainedConfig:
@pytest.fixture()
def config_chain(self, monkeypatch, config_file_path):
monkeypatch.setattr(os, 'environ', {'DW_CACHE_DIR': 'env_cache_dir'})
chain = [EnvConfig(), FileConfig(config_file_path=config_file_path)]
return ChainedConfig(config_chain=chain)
@pytest.mark.usefixtures('config_directory', 'default_config_file')
def test_auth_token(self, config_chain):
assert_that(config_chain.auth_token, equal_to('file_token'))
def test_cache_dir(self, config_chain):
assert_that(config_chain.cache_dir, equal_to('env_cache_dir'))
def test_tmp_dir(self, config_chain):
assert_that(config_chain.tmp_dir,
equal_to(path.expanduser(tempfile.gettempdir())))
|
datadotworld/data.world-py
|
tests/datadotworld/test_config.py
|
Python
|
apache-2.0
| 8,193
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
__all__ = ['distance_based_on_molecular_formula']
def distance_based_on_molecular_formula(metabolite1, metabolite2, normalize=True):
"""Calculate the distance of two metabolites bases on the molecular formula
Arguments
---------
metabolite1 : Metabolite
The first metabolite.
metabolite2 : Metabolite
The second metabolite.
normalize : bool, optional
If the distance should be normalized by the total number of elements in both metabolites (defaults to True).
Returns
-------
float
The distance between metabolite1 and metabolite2.
"""
if len(metabolite1.formula.elements) == 0 or len(metabolite2.formula.elements) == 0:
raise ValueError('Cannot calculate distance between metabolites %s and %s' % (metabolite1, metabolite2))
elements = set(list(metabolite1.formula.elements.keys()) + list(metabolite2.formula.elements.keys()))
distance = 0.
for element in elements:
distance += abs(metabolite1.formula.elements.get(element, 0) - metabolite2.formula.elements.get(element, 0))
if normalize:
return distance / sum(list(metabolite1.formula.elements.values()) + list(metabolite2.formula.elements.values()))
else:
return distance
|
KristianJensen/cameo
|
cameo/network_analysis/util.py
|
Python
|
apache-2.0
| 1,935
|
"""
File copy from https://github.com/moddevices/lilvlib
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------------------------------------
# Imports
import json
import lilv
import os
from math import fmod
# ------------------------------------------------------------------------------------------------------------
# Utilities
def LILV_FOREACH(collection, func):
itr = collection.begin()
while itr:
yield func(collection.get(itr))
itr = collection.next(itr)
class NS(object):
def __init__(self, world, base):
self.world = world
self.base = base
self._cache = {}
def __getattr__(self, attr):
if attr.endswith("_"):
attr = attr[:-1]
if attr not in self._cache:
self._cache[attr] = lilv.Node(self.world.new_uri(self.base+attr))
return self._cache[attr]
def is_integer(string):
return string.strip().lstrip("-+").isdigit()
def get_short_port_name(portName):
if len(portName) <= 16:
return portName
portName = portName.split("/",1)[0].split(" (",1)[0].split(" [",1)[0].strip()
# cut stuff if too big
if len(portName) > 16:
portName = portName[0] + portName[1:].replace("a","").replace("e","").replace("i","").replace("o","").replace("u","")
if len(portName) > 16:
portName = portName[:16]
return portName.strip()
# ------------------------------------------------------------------------------------------------------------
def get_category(nodes):
lv2_category_indexes = {
'DelayPlugin': ['Delay'],
'DistortionPlugin': ['Distortion'],
'WaveshaperPlugin': ['Distortion', 'Waveshaper'],
'DynamicsPlugin': ['Dynamics'],
'AmplifierPlugin': ['Dynamics', 'Amplifier'],
'CompressorPlugin': ['Dynamics', 'Compressor'],
'ExpanderPlugin': ['Dynamics', 'Expander'],
'GatePlugin': ['Dynamics', 'Gate'],
'LimiterPlugin': ['Dynamics', 'Limiter'],
'FilterPlugin': ['Filter'],
'AllpassPlugin': ['Filter', 'Allpass'],
'BandpassPlugin': ['Filter', 'Bandpass'],
'CombPlugin': ['Filter', 'Comb'],
'EQPlugin': ['Filter', 'Equaliser'],
'MultiEQPlugin': ['Filter', 'Equaliser', 'Multiband'],
'ParaEQPlugin': ['Filter', 'Equaliser', 'Parametric'],
'HighpassPlugin': ['Filter', 'Highpass'],
'LowpassPlugin': ['Filter', 'Lowpass'],
'GeneratorPlugin': ['Generator'],
'ConstantPlugin': ['Generator', 'Constant'],
'InstrumentPlugin': ['Generator', 'Instrument'],
'OscillatorPlugin': ['Generator', 'Oscillator'],
'ModulatorPlugin': ['Modulator'],
'ChorusPlugin': ['Modulator', 'Chorus'],
'FlangerPlugin': ['Modulator', 'Flanger'],
'PhaserPlugin': ['Modulator', 'Phaser'],
'ReverbPlugin': ['Reverb'],
'SimulatorPlugin': ['Simulator'],
'SpatialPlugin': ['Spatial'],
'SpectralPlugin': ['Spectral'],
'PitchPlugin': ['Spectral', 'Pitch Shifter'],
'UtilityPlugin': ['Utility'],
'AnalyserPlugin': ['Utility', 'Analyser'],
'ConverterPlugin': ['Utility', 'Converter'],
'FunctionPlugin': ['Utility', 'Function'],
'MixerPlugin': ['Utility', 'Mixer'],
#'MIDIPlugin': ['MIDI', 'Utility'],
}
mod_category_indexes = {
'DelayPlugin': ['Delay'],
'DistortionPlugin': ['Distortion'],
'DynamicsPlugin': ['Dynamics'],
'FilterPlugin': ['Filter'],
'GeneratorPlugin': ['Generator'],
'ModulatorPlugin': ['Modulator'],
'ReverbPlugin': ['Reverb'],
'SimulatorPlugin': ['Simulator'],
'SpatialPlugin': ['Spatial'],
'SpectralPlugin': ['Spectral'],
'UtilityPlugin': ['Utility'],
'MIDIPlugin': ['Utility', 'MIDI'],
}
def fill_in_lv2_category(node):
category = node.as_string().replace("http://lv2plug.in/ns/lv2core#","")
if category in lv2_category_indexes.keys():
return lv2_category_indexes[category]
return []
def fill_in_mod_category(node):
category = node.as_string().replace("http://moddevices.com/ns/mod#","")
if category in mod_category_indexes.keys():
return mod_category_indexes[category]
return []
categories = []
for cat in [cat for catlist in LILV_FOREACH(nodes, fill_in_mod_category) for cat in catlist]:
if cat not in categories:
categories.append(cat)
if len(categories) > 0:
return categories
for cat in [cat for catlist in LILV_FOREACH(nodes, fill_in_lv2_category) for cat in catlist]:
if cat not in categories:
categories.append(cat)
return categories
def get_port_data(port, subj):
nodes = port.get_value(subj.me)
data = []
it = lilv.lilv_nodes_begin(nodes)
while not lilv.lilv_nodes_is_end(nodes, it):
dat = lilv.lilv_nodes_get(nodes, it)
it = lilv.lilv_nodes_next(nodes, it)
if dat is None:
continue
data.append(lilv.lilv_node_as_string(dat))
return data
def get_port_unit(miniuri):
# using label, render, symbol
units = {
's': ["seconds", "%f s", "s"],
'ms': ["milliseconds", "%f ms", "ms"],
'min': ["minutes", "%f mins", "min"],
'bar': ["bars", "%f bars", "bars"],
'beat': ["beats", "%f beats", "beats"],
'frame': ["audio frames", "%f frames", "frames"],
'm': ["metres", "%f m", "m"],
'cm': ["centimetres", "%f cm", "cm"],
'mm': ["millimetres", "%f mm", "mm"],
'km': ["kilometres", "%f km", "km"],
'inch': ["inches", """%f\"""", "in"],
'mile': ["miles", "%f mi", "mi"],
'db': ["decibels", "%f dB", "dB"],
'pc': ["percent", "%f%%", "%"],
'coef': ["coefficient", "* %f", "*"],
'hz': ["hertz", "%f Hz", "Hz"],
'khz': ["kilohertz", "%f kHz", "kHz"],
'mhz': ["megahertz", "%f MHz", "MHz"],
'bpm': ["beats per minute", "%f BPM", "BPM"],
'oct': ["octaves", "%f octaves", "oct"],
'cent': ["cents", "%f ct", "ct"],
'semitone12TET': ["semitones", "%f semi", "semi"],
'degree': ["degrees", "%f deg", "deg"],
'midiNote': ["MIDI note", "MIDI note %d", "note"],
}
if miniuri in units.keys():
return units[miniuri]
return ("","","")
# ------------------------------------------------------------------------------------------------------------
# get_bundle_dirname
def get_bundle_dirname(bundleuri):
bundle = lilv.lilv_uri_to_path(bundleuri)
if not os.path.exists(bundle):
raise IOError(bundleuri)
if os.path.isfile(bundle):
bundle = os.path.dirname(bundle)
return bundle
# ------------------------------------------------------------------------------------------------------------
# get_pedalboard_info
# Get info from an lv2 bundle
# @a bundle is a string, consisting of a directory in the filesystem (absolute pathname).
def get_pedalboard_info(bundle):
# lilv wants the last character as the separator
bundle = os.path.abspath(bundle)
if not bundle.endswith(os.sep):
bundle += os.sep
# Create our own unique lilv world
# We'll load a single bundle and get all plugins from it
world = lilv.World()
# this is needed when loading specific bundles instead of load_all
# (these functions are not exposed via World yet)
lilv.lilv_world_load_specifications(world.me)
lilv.lilv_world_load_plugin_classes(world.me)
# convert bundle string into a lilv node
bundlenode = lilv.lilv_new_file_uri(world.me, None, bundle)
# load the bundle
world.load_bundle(bundlenode)
# free bundlenode, no longer needed
lilv.lilv_node_free(bundlenode)
# get all plugins in the bundle
plugins = world.get_all_plugins()
# make sure the bundle includes 1 and only 1 plugin (the pedalboard)
if plugins.size() != 1:
raise Exception('get_pedalboard_info(%s) - bundle has 0 or > 1 plugin'.format(bundle))
# no indexing in python-lilv yet, just get the first item
plugin = None
for p in plugins:
plugin = p
break
if plugin is None:
raise Exception('get_pedalboard_info(%s) - failed to get plugin, you are using an old lilv!'.format(bundle))
# define the needed stuff
ns_rdf = NS(world, lilv.LILV_NS_RDF)
ns_lv2core = NS(world, lilv.LILV_NS_LV2)
ns_ingen = NS(world, "http://drobilla.net/ns/ingen#")
ns_mod = NS(world, "http://moddevices.com/ns/mod#")
ns_modpedal = NS(world, "http://moddevices.com/ns/modpedal#")
# check if the plugin is a pedalboard
def fill_in_type(node):
return node.as_string()
plugin_types = [i for i in LILV_FOREACH(plugin.get_value(ns_rdf.type_), fill_in_type)]
if "http://moddevices.com/ns/modpedal#Pedalboard" not in plugin_types:
raise Exception('get_pedalboard_info(%s) - plugin has no mod:Pedalboard type'.format(bundle))
# let's get all the info now
ingenarcs = []
ingenblocks = []
info = {
'name' : plugin.get_name().as_string(),
'uri' : plugin.get_uri().as_string(),
'author': plugin.get_author_name().as_string() or "", # Might be empty
'hardware': {
# we save this info later
'audio': {
'ins' : 0,
'outs': 0
},
'cv': {
'ins' : 0,
'outs': 0
},
'midi': {
'ins' : 0,
'outs': 0
}
},
'size': {
'width' : plugin.get_value(ns_modpedal.width).get_first().as_int(),
'height': plugin.get_value(ns_modpedal.height).get_first().as_int(),
},
'screenshot' : os.path.basename(plugin.get_value(ns_modpedal.screenshot).get_first().as_string() or ""),
'thumbnail' : os.path.basename(plugin.get_value(ns_modpedal.thumbnail).get_first().as_string() or ""),
'connections': [], # we save this info later
'plugins' : [] # we save this info later
}
# connections
arcs = plugin.get_value(ns_ingen.arc)
it = arcs.begin()
while not arcs.is_end(it):
arc = arcs.get(it)
it = arcs.next(it)
if arc.me is None:
continue
head = lilv.lilv_world_get(world.me, arc.me, ns_ingen.head.me, None)
tail = lilv.lilv_world_get(world.me, arc.me, ns_ingen.tail.me, None)
if head is None or tail is None:
continue
ingenarcs.append({
"source": lilv.lilv_uri_to_path(lilv.lilv_node_as_string(tail)).replace(bundle,"",1),
"target": lilv.lilv_uri_to_path(lilv.lilv_node_as_string(head)).replace(bundle,"",1)
})
# hardware ports
handled_port_uris = []
ports = plugin.get_value(ns_lv2core.port)
it = ports.begin()
while not ports.is_end(it):
port = ports.get(it)
it = ports.next(it)
if port.me is None:
continue
# check if we already handled this port
port_uri = port.as_uri()
if port_uri in handled_port_uris:
continue
if port_uri.endswith("/control_in") or port_uri.endswith("/control_out"):
continue
handled_port_uris.append(port_uri)
# get types
port_types = lilv.lilv_world_find_nodes(world.me, port.me, ns_rdf.type_.me, None)
if port_types is None:
continue
portDir = "" # input or output
portType = "" # atom, audio or cv
it2 = lilv.lilv_nodes_begin(port_types)
while not lilv.lilv_nodes_is_end(port_types, it2):
port_type = lilv.lilv_nodes_get(port_types, it2)
it2 = lilv.lilv_nodes_next(port_types, it2)
if port_type is None:
continue
port_type_uri = lilv.lilv_node_as_uri(port_type)
if port_type_uri == "http://lv2plug.in/ns/lv2core#InputPort":
portDir = "input"
elif port_type_uri == "http://lv2plug.in/ns/lv2core#OutputPort":
portDir = "output"
elif port_type_uri == "http://lv2plug.in/ns/lv2core#AudioPort":
portType = "audio"
elif port_type_uri == "http://lv2plug.in/ns/lv2core#CVPort":
portType = "cv"
elif port_type_uri == "http://lv2plug.in/ns/ext/atom#AtomPort":
portType = "atom"
if not (portDir or portType):
continue
if portType == "audio":
if portDir == "input":
info['hardware']['audio']['ins'] += 1
else:
info['hardware']['audio']['outs'] += 1
elif portType == "atom":
if portDir == "input":
info['hardware']['midi']['ins'] += 1
else:
info['hardware']['midi']['outs'] += 1
elif portType == "cv":
if portDir == "input":
info['hardware']['cv']['ins'] += 1
else:
info['hardware']['cv']['outs'] += 1
# plugins
blocks = plugin.get_value(ns_ingen.block)
it = blocks.begin()
while not blocks.is_end(it):
block = blocks.get(it)
it = blocks.next(it)
if block.me is None:
continue
protouri1 = lilv.lilv_world_get(world.me, block.me, ns_lv2core.prototype.me, None)
protouri2 = lilv.lilv_world_get(world.me, block.me, ns_ingen.prototype.me, None)
if protouri1 is not None:
proto = protouri1
elif protouri2 is not None:
proto = protouri2
else:
continue
instance = lilv.lilv_uri_to_path(lilv.lilv_node_as_string(block.me)).replace(bundle,"",1)
uri = lilv.lilv_node_as_uri(proto)
enabled = lilv.lilv_world_get(world.me, block.me, ns_ingen.enabled.me, None)
builder = lilv.lilv_world_get(world.me, block.me, ns_mod.builderVersion.me, None)
release = lilv.lilv_world_get(world.me, block.me, ns_mod.releaseNumber.me, None)
minorver = lilv.lilv_world_get(world.me, block.me, ns_lv2core.minorVersion.me, None)
microver = lilv.lilv_world_get(world.me, block.me, ns_lv2core.microVersion.me, None)
ingenblocks.append({
"instance": instance,
"uri" : uri,
"x" : lilv.lilv_node_as_float(lilv.lilv_world_get(world.me, block.me, ns_ingen.canvasX.me, None)),
"y" : lilv.lilv_node_as_float(lilv.lilv_world_get(world.me, block.me, ns_ingen.canvasY.me, None)),
"enabled" : lilv.lilv_node_as_bool(enabled) if enabled is not None else False,
"builder" : lilv.lilv_node_as_int(builder) if builder else 0,
"release" : lilv.lilv_node_as_int(release) if release else 0,
"minorVersion": lilv.lilv_node_as_int(minorver) if minorver else 0,
"microVersion": lilv.lilv_node_as_int(microver) if microver else 0,
})
info['connections'] = ingenarcs
info['plugins'] = ingenblocks
return info
# ------------------------------------------------------------------------------------------------------------
# get_pedalboard_name
# Faster version of get_pedalboard_info when we just need to know the pedalboard name
# @a bundle is a string, consisting of a directory in the filesystem (absolute pathname).
def get_pedalboard_name(bundle):
# lilv wants the last character as the separator
bundle = os.path.abspath(bundle)
if not bundle.endswith(os.sep):
bundle += os.sep
# Create our own unique lilv world
# We'll load a single bundle and get all plugins from it
world = lilv.World()
# this is needed when loading specific bundles instead of load_all
# (these functions are not exposed via World yet)
lilv.lilv_world_load_specifications(world.me)
lilv.lilv_world_load_plugin_classes(world.me)
# convert bundle string into a lilv node
bundlenode = lilv.lilv_new_file_uri(world.me, None, bundle)
# load the bundle
world.load_bundle(bundlenode)
# free bundlenode, no longer needed
lilv.lilv_node_free(bundlenode)
# get all plugins in the bundle
plugins = world.get_all_plugins()
# make sure the bundle includes 1 and only 1 plugin (the pedalboard)
if plugins.size() != 1:
raise Exception('get_pedalboard_info(%s) - bundle has 0 or > 1 plugin'.format(bundle))
# no indexing in python-lilv yet, just get the first item
plugin = None
for p in plugins:
plugin = p
break
if plugin is None:
raise Exception('get_pedalboard_info(%s) - failed to get plugin, you are using an old lilv!'.format(bundle))
# define the needed stuff
ns_rdf = NS(world, lilv.LILV_NS_RDF)
# check if the plugin is a pedalboard
def fill_in_type(node):
return node.as_string()
plugin_types = [i for i in LILV_FOREACH(plugin.get_value(ns_rdf.type_), fill_in_type)]
if "http://moddevices.com/ns/modpedal#Pedalboard" not in plugin_types:
raise Exception('get_pedalboard_info(%s) - plugin has no mod:Pedalboard type'.format(bundle))
return plugin.get_name().as_string()
# ------------------------------------------------------------------------------------------------------------
# plugin_has_modgui
# Check if a plugin has modgui
def plugin_has_modgui(world, plugin):
# define the needed stuff
ns_modgui = NS(world, "http://moddevices.com/ns/modgui#")
# --------------------------------------------------------------------------------------------------------
# get the proper modgui
modguigui = None
nodes = plugin.get_value(ns_modgui.gui)
it = nodes.begin()
while not nodes.is_end(it):
mgui = nodes.get(it)
it = nodes.next(it)
if mgui.me is None:
continue
resdir = world.find_nodes(mgui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if resdir.me is None:
continue
modguigui = mgui
if os.path.expanduser("~") in lilv.lilv_uri_to_path(resdir.as_string()):
# found a modgui in the home dir, stop here and use it
break
del nodes, it
# --------------------------------------------------------------------------------------------------------
# check selected modgui
if modguigui is None or modguigui.me is None:
return False
# resourcesDirectory *must* be present
modgui_resdir = world.find_nodes(modguigui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if modgui_resdir.me is None:
return False
return os.path.exists(lilv.lilv_uri_to_path(modgui_resdir.as_string()))
# ------------------------------------------------------------------------------------------------------------
# get_plugin_info
# Get info from a lilv plugin
# This is used in get_plugins_info below and MOD-SDK
def get_plugin_info(world, plugin, useAbsolutePath = True):
# define the needed stuff
ns_doap = NS(world, lilv.LILV_NS_DOAP)
ns_foaf = NS(world, lilv.LILV_NS_FOAF)
ns_rdf = NS(world, lilv.LILV_NS_RDF)
ns_rdfs = NS(world, lilv.LILV_NS_RDFS)
ns_lv2core = NS(world, lilv.LILV_NS_LV2)
ns_atom = NS(world, "http://lv2plug.in/ns/ext/atom#")
ns_midi = NS(world, "http://lv2plug.in/ns/ext/midi#")
ns_morph = NS(world, "http://lv2plug.in/ns/ext/morph#")
ns_pprops = NS(world, "http://lv2plug.in/ns/ext/port-props#")
ns_pset = NS(world, "http://lv2plug.in/ns/ext/presets#")
ns_units = NS(world, "http://lv2plug.in/ns/extensions/units#")
ns_mod = NS(world, "http://moddevices.com/ns/mod#")
ns_modgui = NS(world, "http://moddevices.com/ns/modgui#")
bundleuri = plugin.get_bundle_uri().as_string()
bundle = lilv.lilv_uri_to_path(bundleuri)
errors = []
warnings = []
# --------------------------------------------------------------------------------------------------------
# uri
uri = plugin.get_uri().as_string() or ""
if not uri:
errors.append("plugin uri is missing or invalid")
elif uri.startswith("file:"):
errors.append("plugin uri is local, and thus not suitable for redistribution")
#elif not (uri.startswith("http:") or uri.startswith("https:")):
#warnings.append("plugin uri is not a real url")
# --------------------------------------------------------------------------------------------------------
# name
name = plugin.get_name().as_string() or ""
if not name:
errors.append("plugin name is missing")
# --------------------------------------------------------------------------------------------------------
# binary
binary = lilv.lilv_uri_to_path(plugin.get_library_uri().as_string() or "")
if not binary:
errors.append("plugin binary is missing")
elif not useAbsolutePath:
binary = binary.replace(bundle,"",1)
# --------------------------------------------------------------------------------------------------------
# license
license = plugin.get_value(ns_doap.license).get_first().as_string() or ""
if not license:
prj = plugin.get_value(ns_lv2core.project).get_first()
if prj.me is not None:
licsnode = lilv.lilv_world_get(world.me, prj.me, ns_doap.license.me, None)
if licsnode is not None:
license = lilv.lilv_node_as_string(licsnode)
del licsnode
del prj
if not license:
errors.append("plugin license is missing")
elif license.startswith(bundleuri):
license = license.replace(bundleuri,"",1)
warnings.append("plugin license entry is a local path instead of a string")
# --------------------------------------------------------------------------------------------------------
# comment
comment = (plugin.get_value(ns_rdfs.comment).get_first().as_string() or "").strip()
# sneaky empty comments!
if len(comment) > 0 and comment == len(comment) * comment[0]:
comment = ""
if not comment:
errors.append("plugin comment is missing")
# --------------------------------------------------------------------------------------------------------
# version
microver = plugin.get_value(ns_lv2core.microVersion).get_first()
minorver = plugin.get_value(ns_lv2core.minorVersion).get_first()
if microver.me is None and minorver.me is None:
errors.append("plugin is missing version information")
minorVersion = 0
microVersion = 0
else:
if minorver.me is None:
errors.append("plugin is missing minorVersion")
minorVersion = 0
else:
minorVersion = minorver.as_int()
if microver.me is None:
errors.append("plugin is missing microVersion")
microVersion = 0
else:
microVersion = microver.as_int()
del minorver
del microver
version = "%d.%d" % (minorVersion, microVersion)
# 0.x is experimental
if minorVersion == 0:
stability = "experimental"
# odd x.2 or 2.x is testing/development
elif minorVersion % 2 != 0 or microVersion % 2 != 0:
stability = "testing"
# otherwise it's stable
else:
stability = "stable"
# --------------------------------------------------------------------------------------------------------
# author
author = {
'name' : plugin.get_author_name().as_string() or "",
'homepage': plugin.get_author_homepage().as_string() or "",
'email' : plugin.get_author_email().as_string() or "",
}
if not author['name']:
errors.append("plugin author name is missing")
if not author['homepage']:
prj = plugin.get_value(ns_lv2core.project).get_first()
if prj.me is not None:
maintainer = lilv.lilv_world_get(world.me, prj.me, ns_doap.maintainer.me, None)
if maintainer is not None:
homepage = lilv.lilv_world_get(world.me, maintainer, ns_foaf.homepage.me, None)
if homepage is not None:
author['homepage'] = lilv.lilv_node_as_string(homepage)
del homepage
del maintainer
del prj
if not author['homepage']:
warnings.append("plugin author homepage is missing")
if not author['email']:
pass
elif author['email'].startswith(bundleuri):
author['email'] = author['email'].replace(bundleuri,"",1)
warnings.append("plugin author email entry is missing 'mailto:' prefix")
elif author['email'].startswith("mailto:"):
author['email'] = author['email'].replace("mailto:","",1)
# --------------------------------------------------------------------------------------------------------
# brand
brand = plugin.get_value(ns_mod.brand).get_first().as_string() or ""
if not brand:
brand = author['name'].split(" - ",1)[0].split(" ",1)[0]
brand = brand.rstrip(",").rstrip(";")
if len(brand) > 11:
brand = brand[:11]
warnings.append("plugin brand is missing")
elif len(brand) > 11:
brand = brand[:11]
errors.append("plugin brand has more than 11 characters")
# --------------------------------------------------------------------------------------------------------
# label
label = plugin.get_value(ns_mod.label).get_first().as_string() or ""
if not label:
if len(name) <= 16:
label = name
else:
labels = name.split(" - ",1)[0].split(" ")
if labels[0].lower() in bundle.lower() and len(labels) > 1 and not labels[1].startswith(("(","[")):
label = labels[1]
else:
label = labels[0]
if len(label) > 16:
label = label[:16]
warnings.append("plugin label is missing")
del labels
elif len(label) > 16:
label = label[:16]
errors.append("plugin label has more than 16 characters")
# --------------------------------------------------------------------------------------------------------
# bundles
bundles = []
if useAbsolutePath:
bnodes = lilv.lilv_plugin_get_data_uris(plugin.me)
it = lilv.lilv_nodes_begin(bnodes)
while not lilv.lilv_nodes_is_end(bnodes, it):
bnode = lilv.lilv_nodes_get(bnodes, it)
it = lilv.lilv_nodes_next(bnodes, it)
if bnode is None:
continue
if not lilv.lilv_node_is_uri(bnode):
continue
bpath = os.path.abspath(os.path.dirname(lilv.lilv_uri_to_path(lilv.lilv_node_as_uri(bnode))))
if not bpath.endswith(os.sep):
bpath += os.sep
if bpath not in bundles:
bundles.append(bpath)
if bundle not in bundles:
bundles.append(bundle)
del bnodes, it
# --------------------------------------------------------------------------------------------------------
# get the proper modgui
modguigui = None
nodes = plugin.get_value(ns_modgui.gui)
it = nodes.begin()
while not nodes.is_end(it):
mgui = nodes.get(it)
it = nodes.next(it)
if mgui.me is None:
continue
resdir = world.find_nodes(mgui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if resdir.me is None:
continue
modguigui = mgui
if not useAbsolutePath:
# special build, use first modgui found
break
if os.path.expanduser("~") in lilv.lilv_uri_to_path(resdir.as_string()):
# found a modgui in the home dir, stop here and use it
break
del nodes, it
# --------------------------------------------------------------------------------------------------------
# gui
gui = {}
if modguigui is None or modguigui.me is None:
warnings.append("no modgui available")
else:
# resourcesDirectory *must* be present
modgui_resdir = world.find_nodes(modguigui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if modgui_resdir.me is None:
errors.append("modgui has no resourcesDirectory data")
else:
if useAbsolutePath:
gui['resourcesDirectory'] = lilv.lilv_uri_to_path(modgui_resdir.as_string())
# check if modgui is defined in a separate file
gui['usingSeeAlso'] = os.path.exists(os.path.join(bundle, "modgui.ttl"))
# check if the modgui definition is on its own file and in the user dir
gui['modificableInPlace'] = bool((bundle not in gui['resourcesDirectory'] or gui['usingSeeAlso']) and
os.path.expanduser("~") in gui['resourcesDirectory'])
else:
gui['resourcesDirectory'] = modgui_resdir.as_string().replace(bundleuri,"",1)
# icon and settings templates
modgui_icon = world.find_nodes(modguigui.me, ns_modgui.iconTemplate .me, None).get_first()
modgui_setts = world.find_nodes(modguigui.me, ns_modgui.settingsTemplate.me, None).get_first()
if modgui_icon.me is None:
errors.append("modgui has no iconTemplate data")
else:
iconFile = lilv.lilv_uri_to_path(modgui_icon.as_string())
if os.path.exists(iconFile):
gui['iconTemplate'] = iconFile if useAbsolutePath else iconFile.replace(bundle,"",1)
else:
errors.append("modgui iconTemplate file is missing")
del iconFile
if modgui_setts.me is not None:
settingsFile = lilv.lilv_uri_to_path(modgui_setts.as_string())
if os.path.exists(settingsFile):
gui['settingsTemplate'] = settingsFile if useAbsolutePath else settingsFile.replace(bundle,"",1)
else:
errors.append("modgui settingsTemplate file is missing")
del settingsFile
# javascript and stylesheet files
modgui_script = world.find_nodes(modguigui.me, ns_modgui.javascript.me, None).get_first()
modgui_style = world.find_nodes(modguigui.me, ns_modgui.stylesheet.me, None).get_first()
if modgui_script.me is not None:
javascriptFile = lilv.lilv_uri_to_path(modgui_script.as_string())
if os.path.exists(javascriptFile):
gui['javascript'] = javascriptFile if useAbsolutePath else javascriptFile.replace(bundle,"",1)
else:
errors.append("modgui javascript file is missing")
del javascriptFile
if modgui_style.me is None:
errors.append("modgui has no stylesheet data")
else:
stylesheetFile = lilv.lilv_uri_to_path(modgui_style.as_string())
if os.path.exists(stylesheetFile):
gui['stylesheet'] = stylesheetFile if useAbsolutePath else stylesheetFile.replace(bundle,"",1)
else:
errors.append("modgui stylesheet file is missing")
del stylesheetFile
# template data for backwards compatibility
# FIXME remove later once we got rid of all templateData files
modgui_templ = world.find_nodes(modguigui.me, ns_modgui.templateData.me, None).get_first()
if modgui_templ.me is not None:
warnings.append("modgui is using old deprecated templateData")
templFile = lilv.lilv_uri_to_path(modgui_templ.as_string())
if os.path.exists(templFile):
with open(templFile, 'r') as fd:
try:
data = json.loads(fd.read())
except:
data = {}
keys = list(data.keys())
if 'author' in keys:
gui['brand'] = data['author']
if 'label' in keys:
gui['label'] = data['label']
if 'color' in keys:
gui['color'] = data['color']
if 'knob' in keys:
gui['knob'] = data['knob']
if 'controls' in keys:
index = 0
ports = []
for ctrl in data['controls']:
ports.append({
'index' : index,
'name' : ctrl['name'],
'symbol': ctrl['symbol'],
})
index += 1
gui['ports'] = ports
del templFile
# screenshot and thumbnail
modgui_scrn = world.find_nodes(modguigui.me, ns_modgui.screenshot.me, None).get_first()
modgui_thumb = world.find_nodes(modguigui.me, ns_modgui.thumbnail .me, None).get_first()
if modgui_scrn.me is not None:
gui['screenshot'] = lilv.lilv_uri_to_path(modgui_scrn.as_string())
if not os.path.exists(gui['screenshot']):
errors.append("modgui screenshot file is missing")
if not useAbsolutePath:
gui['screenshot'] = gui['screenshot'].replace(bundle,"",1)
else:
errors.append("modgui has no screnshot data")
if modgui_thumb.me is not None:
gui['thumbnail'] = lilv.lilv_uri_to_path(modgui_thumb.as_string())
if not os.path.exists(gui['thumbnail']):
errors.append("modgui thumbnail file is missing")
if not useAbsolutePath:
gui['thumbnail'] = gui['thumbnail'].replace(bundle,"",1)
else:
errors.append("modgui has no thumbnail data")
# extra stuff, all optional
modgui_brand = world.find_nodes(modguigui.me, ns_modgui.brand.me, None).get_first()
modgui_label = world.find_nodes(modguigui.me, ns_modgui.label.me, None).get_first()
modgui_model = world.find_nodes(modguigui.me, ns_modgui.model.me, None).get_first()
modgui_panel = world.find_nodes(modguigui.me, ns_modgui.panel.me, None).get_first()
modgui_color = world.find_nodes(modguigui.me, ns_modgui.color.me, None).get_first()
modgui_knob = world.find_nodes(modguigui.me, ns_modgui.knob .me, None).get_first()
if modgui_brand.me is not None:
gui['brand'] = modgui_brand.as_string()
if modgui_label.me is not None:
gui['label'] = modgui_label.as_string()
if modgui_model.me is not None:
gui['model'] = modgui_model.as_string()
if modgui_panel.me is not None:
gui['panel'] = modgui_panel.as_string()
if modgui_color.me is not None:
gui['color'] = modgui_color.as_string()
if modgui_knob.me is not None:
gui['knob'] = modgui_knob.as_string()
# ports
errpr = False
sybls = []
ports = []
nodes = world.find_nodes(modguigui.me, ns_modgui.port.me, None)
it = lilv.lilv_nodes_begin(nodes.me)
while not lilv.lilv_nodes_is_end(nodes.me, it):
port = lilv.lilv_nodes_get(nodes.me, it)
it = lilv.lilv_nodes_next(nodes.me, it)
if port is None:
break
port_indx = world.find_nodes(port, ns_lv2core.index .me, None).get_first()
port_symb = world.find_nodes(port, ns_lv2core.symbol.me, None).get_first()
port_name = world.find_nodes(port, ns_lv2core.name .me, None).get_first()
if None in (port_indx.me, port_name.me, port_symb.me):
if not errpr:
errors.append("modgui has some invalid port data")
errpr = True
continue
port_indx = port_indx.as_int()
port_symb = port_symb.as_string()
port_name = port_name.as_string()
ports.append({
'index' : port_indx,
'symbol': port_symb,
'name' : port_name,
})
if port_symb not in sybls:
sybls.append(port_symb)
elif not errpr:
errors.append("modgui has some duplicated port symbols")
errpr = True
# sort ports
if len(ports) > 0:
ports2 = {}
for port in ports:
ports2[port['index']] = port
gui['ports'] = [ports2[i] for i in ports2]
del ports2
# cleanup
del ports, nodes, it
# --------------------------------------------------------------------------------------------------------
# ports
index = 0
ports = {
'audio' : { 'input': [], 'output': [] },
'control': { 'input': [], 'output': [] },
'midi' : { 'input': [], 'output': [] }
}
portsymbols = []
portnames = []
# function for filling port info
def fill_port_info(port):
# base data
portname = lilv.lilv_node_as_string(port.get_name()) or ""
if not portname:
portname = "_%i" % index
errors.append("port with index %i has no name" % index)
portsymbol = lilv.lilv_node_as_string(port.get_symbol()) or ""
if not portsymbol:
portsymbol = "_%i" % index
errors.append("port with index %i has no symbol" % index)
# check for duplicate names
if portname in portsymbols:
warnings.append("port name '%s' is not unique" % portname)
else:
portnames.append(portname)
# check for duplicate symbols
if portsymbol in portsymbols:
errors.append("port symbol '%s' is not unique" % portsymbol)
else:
portsymbols.append(portsymbol)
# short name
psname = lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.shortName.me))
if psname is not None:
psname = lilv.lilv_node_as_string(psname) or ""
if not psname:
psname = get_short_port_name(portname)
if len(psname) > 16:
warnings.append("port '%s' name is too big, reduce the name size or provide a shortName" % portname)
elif len(psname) > 16:
psname = psname[:16]
errors.append("port '%s' short name has more than 16 characters" % portname)
# check for old style shortName
if port.get_value(ns_lv2core.shortname.me) is not None:
errors.append("port '%s' short name is using old style 'shortname' instead of 'shortName'" % portname)
# port types
types = [typ.rsplit("#",1)[-1].replace("Port","",1) for typ in get_port_data(port, ns_rdf.type_)]
if "Atom" in types \
and port.supports_event(ns_midi.MidiEvent.me) \
and lilv.Nodes(port.get_value(ns_atom.bufferType.me)).get_first() == ns_atom.Sequence:
types.append("MIDI")
#if "Morph" in types:
#morphtyp = lilv.lilv_nodes_get_first(port.get_value(ns_morph.supportsType.me))
#if morphtyp is not None:
#morphtyp = lilv.lilv_node_as_uri(morphtyp)
#if morphtyp:
#types.append(morphtyp.rsplit("#",1)[-1].replace("Port","",1))
# port comment
pcomment = (get_port_data(port, ns_rdfs.comment) or [""])[0]
# port designation
designation = (get_port_data(port, ns_lv2core.designation) or [""])[0]
# port rangeSteps
rangeSteps = (get_port_data(port, ns_mod.rangeSteps) or get_port_data(port, ns_pprops.rangeSteps) or [None])[0]
# port properties
properties = [typ.rsplit("#",1)[-1] for typ in get_port_data(port, ns_lv2core.portProperty)]
# data
ranges = {}
scalepoints = []
# unit block
ulabel = ""
urender = ""
usymbol = ""
# control and cv must contain ranges, might contain scale points
if "Control" in types or "CV" in types:
isInteger = "integer" in properties
if isInteger and "CV" in types:
errors.append("port '%s' has integer property and CV type" % portname)
xdefault = lilv.lilv_nodes_get_first(port.get_value(ns_mod.default.me)) or \
lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.default.me))
xminimum = lilv.lilv_nodes_get_first(port.get_value(ns_mod.minimum.me)) or \
lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.minimum.me))
xmaximum = lilv.lilv_nodes_get_first(port.get_value(ns_mod.maximum.me)) or \
lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.maximum.me))
if xminimum is not None and xmaximum is not None:
if isInteger:
if is_integer(lilv.lilv_node_as_string(xminimum)):
ranges['minimum'] = lilv.lilv_node_as_int(xminimum)
else:
ranges['minimum'] = lilv.lilv_node_as_float(xminimum)
if fmod(ranges['minimum'], 1.0) == 0.0:
warnings.append("port '%s' has integer property but minimum value is float" % portname)
else:
errors.append("port '%s' has integer property but minimum value has non-zero decimals" % portname)
ranges['minimum'] = int(ranges['minimum'])
if is_integer(lilv.lilv_node_as_string(xmaximum)):
ranges['maximum'] = lilv.lilv_node_as_int(xmaximum)
else:
ranges['maximum'] = lilv.lilv_node_as_float(xmaximum)
if fmod(ranges['maximum'], 1.0) == 0.0:
warnings.append("port '%s' has integer property but maximum value is float" % portname)
else:
errors.append("port '%s' has integer property but maximum value has non-zero decimals" % portname)
ranges['maximum'] = int(ranges['maximum'])
else:
ranges['minimum'] = lilv.lilv_node_as_float(xminimum)
ranges['maximum'] = lilv.lilv_node_as_float(xmaximum)
if is_integer(lilv.lilv_node_as_string(xminimum)):
warnings.append("port '%s' minimum value is an integer" % portname)
if is_integer(lilv.lilv_node_as_string(xmaximum)):
warnings.append("port '%s' maximum value is an integer" % portname)
if ranges['minimum'] >= ranges['maximum']:
ranges['maximum'] = ranges['minimum'] + (1 if isInteger else 0.1)
errors.append("port '%s' minimum value is equal or higher than its maximum" % portname)
if xdefault is not None:
if isInteger:
if is_integer(lilv.lilv_node_as_string(xdefault)):
ranges['default'] = lilv.lilv_node_as_int(xdefault)
else:
ranges['default'] = lilv.lilv_node_as_float(xdefault)
if fmod(ranges['default'], 1.0) == 0.0:
warnings.append("port '%s' has integer property but default value is float" % portname)
else:
errors.append("port '%s' has integer property but default value has non-zero decimals" % portname)
ranges['default'] = int(ranges['default'])
else:
ranges['default'] = lilv.lilv_node_as_float(xdefault)
if is_integer(lilv.lilv_node_as_string(xdefault)):
warnings.append("port '%s' default value is an integer" % portname)
testmin = ranges['minimum']
testmax = ranges['maximum']
if "sampleRate" in properties:
testmin *= 48000
testmax *= 48000
if not (testmin <= ranges['default'] <= testmax):
ranges['default'] = ranges['minimum']
errors.append("port '%s' default value is out of bounds" % portname)
else:
ranges['default'] = ranges['minimum']
if "Input" in types:
errors.append("port '%s' is missing default value" % portname)
else:
if isInteger:
ranges['minimum'] = 0
ranges['maximum'] = 1
ranges['default'] = 0
else:
ranges['minimum'] = -1.0 if "CV" in types else 0.0
ranges['maximum'] = 1.0
ranges['default'] = 0.0
if "CV" not in types and designation != "http://lv2plug.in/ns/lv2core#latency":
errors.append("port '%s' is missing value ranges" % portname)
nodes = port.get_scale_points()
if nodes is not None:
scalepoints_unsorted = []
it = lilv.lilv_scale_points_begin(nodes)
while not lilv.lilv_scale_points_is_end(nodes, it):
sp = lilv.lilv_scale_points_get(nodes, it)
it = lilv.lilv_scale_points_next(nodes, it)
if sp is None:
continue
label = lilv.lilv_scale_point_get_label(sp)
value = lilv.lilv_scale_point_get_value(sp)
if label is None:
errors.append("a port scalepoint is missing its label")
continue
label = lilv.lilv_node_as_string(label) or ""
if not label:
errors.append("a port scalepoint is missing its label")
continue
if value is None:
errors.append("port scalepoint '%s' is missing its value" % label)
continue
if isInteger:
if is_integer(lilv.lilv_node_as_string(value)):
value = lilv.lilv_node_as_int(value)
else:
value = lilv.lilv_node_as_float(value)
if fmod(value, 1.0) == 0.0:
warnings.append("port '%s' has integer property but scalepoint '%s' value is float" % (portname, label))
else:
errors.append("port '%s' has integer property but scalepoint '%s' value has non-zero decimals" % (portname, label))
value = int(value)
else:
if is_integer(lilv.lilv_node_as_string(value)):
warnings.append("port '%s' scalepoint '%s' value is an integer" % (portname, label))
value = lilv.lilv_node_as_float(value)
if ranges['minimum'] <= value <= ranges['maximum']:
scalepoints_unsorted.append((value, label))
else:
errors.append(("port scalepoint '%s' has an out-of-bounds value:\n" % label) +
("%d < %d < %d" if isInteger else "%f < %f < %f") % (ranges['minimum'], value, ranges['maximum']))
if len(scalepoints_unsorted) != 0:
unsorted = dict(s for s in scalepoints_unsorted)
values = list(v for v, l in scalepoints_unsorted)
values.sort()
scalepoints = list({ 'value': v, 'label': unsorted[v] } for v in values)
del unsorted, values
del scalepoints_unsorted
if "enumeration" in properties and len(scalepoints) <= 1:
errors.append("port '%s' wants to use enumeration but doesn't have enough values" % portname)
properties.remove("enumeration")
# control ports might contain unit
if "Control" in types:
# unit
uunit = lilv.lilv_nodes_get_first(port.get_value(ns_units.unit.me))
if uunit is not None:
uuri = lilv.lilv_node_as_uri(uunit)
# using pre-existing lv2 unit
if uuri is not None and uuri.startswith("http://lv2plug.in/ns/"):
uuri = uuri.replace("http://lv2plug.in/ns/extensions/units#","",1)
alnum = uuri.isalnum()
if not alnum:
errors.append("port '%s' has wrong lv2 unit uri" % portname)
uuri = uuri.rsplit("#",1)[-1].rsplit("/",1)[-1]
ulabel, urender, usymbol = get_port_unit(uuri)
if alnum and not (ulabel and urender and usymbol):
errors.append("port '%s' has unknown lv2 unit (our bug?, data is '%s', '%s', '%s')" % (portname,
ulabel,
urender,
usymbol))
# using custom unit
else:
xlabel = world.find_nodes(uunit, ns_rdfs .label.me, None).get_first()
xrender = world.find_nodes(uunit, ns_units.render.me, None).get_first()
xsymbol = world.find_nodes(uunit, ns_units.symbol.me, None).get_first()
if xlabel.me is not None:
ulabel = xlabel.as_string()
else:
errors.append("port '%s' has custom unit with no label" % portname)
if xrender.me is not None:
urender = xrender.as_string()
else:
errors.append("port '%s' has custom unit with no render" % portname)
if xsymbol.me is not None:
usymbol = xsymbol.as_string()
else:
errors.append("port '%s' has custom unit with no symbol" % portname)
return (types, {
'name' : portname,
'symbol' : portsymbol,
'ranges' : ranges,
'units' : {
'label' : ulabel,
'render': urender,
'symbol': usymbol,
} if "Control" in types and ulabel and urender and usymbol else {},
'comment' : pcomment,
'designation': designation,
'properties' : properties,
'rangeSteps' : rangeSteps,
'scalePoints': scalepoints,
'shortName' : psname,
})
for p in (plugin.get_port_by_index(i) for i in range(plugin.get_num_ports())):
types, info = fill_port_info(p)
info['index'] = index
index += 1
isInput = "Input" in types
types.remove("Input" if isInput else "Output")
for typ in [typl.lower() for typl in types]:
if typ not in ports.keys():
ports[typ] = { 'input': [], 'output': [] }
ports[typ]["input" if isInput else "output"].append(info)
# --------------------------------------------------------------------------------------------------------
# presets
def get_preset_data(preset):
world.load_resource(preset.me)
uri = preset.as_string() or ""
label = world.find_nodes(preset.me, ns_rdfs.label.me, None).get_first().as_string() or ""
if not uri:
errors.append("preset with label '%s' has no uri" % (label or "<unknown>"))
if not label:
errors.append("preset with uri '%s' has no label" % (uri or "<unknown>"))
return (uri, label)
presets = []
presets_related = plugin.get_related(ns_pset.Preset)
presets_data = list(LILV_FOREACH(presets_related, get_preset_data))
if len(presets_data) != 0:
unsorted = dict(p for p in presets_data)
uris = list(unsorted.keys())
uris.sort()
presets = list({ 'uri': p, 'label': unsorted[p] } for p in uris)
del unsorted, uris
del presets_related
# --------------------------------------------------------------------------------------------------------
# done
return {
'uri' : uri,
'name': name,
'binary' : binary,
'brand' : brand,
'label' : label,
'license': license,
'comment': comment,
'category' : get_category(plugin.get_value(ns_rdf.type_)),
'microVersion': microVersion,
'minorVersion': minorVersion,
'version' : version,
'stability': stability,
'author' : author,
'bundles': bundles,
'gui' : gui,
'ports' : ports,
'presets': presets,
'errors' : errors,
'warnings': warnings,
}
# ------------------------------------------------------------------------------------------------------------
# get_plugin_info_helper
# Get info from a simple URI, without the need of your own lilv world
# This is used by get_plugins_info in MOD-SDK
def get_plugin_info_helper(uri):
world = lilv.World()
world.load_all()
plugins = world.get_all_plugins()
return [get_plugin_info(world, p, False) for p in plugins]
# ------------------------------------------------------------------------------------------------------------
# get_plugins_info
# Get plugin-related info from a list of lv2 bundles
# @a bundles is a list of strings, consisting of directories in the filesystem (absolute pathnames).
def get_plugins_info(bundles):
# if empty, do nothing
if len(bundles) == 0:
raise Exception('get_plugins_info() - no bundles provided')
# Create our own unique lilv world
# We'll load the selected bundles and get all plugins from it
world = lilv.World()
# this is needed when loading specific bundles instead of load_all
# (these functions are not exposed via World yet)
lilv.lilv_world_load_specifications(world.me)
lilv.lilv_world_load_plugin_classes(world.me)
# load all bundles
for bundle in bundles:
# lilv wants the last character as the separator
bundle = os.path.abspath(bundle)
if not bundle.endswith(os.sep):
bundle += os.sep
# convert bundle string into a lilv node
bundlenode = lilv.lilv_new_file_uri(world.me, None, bundle)
# load the bundle
world.load_bundle(bundlenode)
# free bundlenode, no longer needed
lilv.lilv_node_free(bundlenode)
# get all plugins available in the selected bundles
plugins = world.get_all_plugins()
# make sure the bundles include something
if plugins.size() == 0:
raise Exception('get_plugins_info() - selected bundles have no plugins')
# return all the info
return [get_plugin_info(world, p, False) for p in plugins]
# ------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
from sys import argv, exit
from pprint import pprint
#get_plugins_info(argv[1:])
#for i in get_plugins_info(argv[1:]): pprint(i)
#exit(0)
for i in get_plugins_info(argv[1:]):
warnings = i['warnings'].copy()
if 'plugin brand is missing' in warnings:
i['warnings'].remove('plugin brand is missing')
if 'plugin label is missing' in warnings:
i['warnings'].remove('plugin label is missing')
if 'no modgui available' in warnings:
i['warnings'].remove('no modgui available')
for warn in warnings:
if "has no short name" in warn:
i['warnings'].remove(warn)
pprint({
'uri' : i['uri'],
'errors' : i['errors'],
'warnings': i['warnings']
}, width=200)
# ------------------------------------------------------------------------------------------------------------
|
PedalPi/PluginsManager
|
pluginsmanager/model/lv2/lilvlib.py
|
Python
|
apache-2.0
| 57,410
|
# -*- coding: utf8 -*-
__author__ = 'meng'
import os
import sys
import shutil
import re
from bs4 import BeautifulSoup
htmlfilename = sys.argv[1]
outfilename = re.sub('\.html','-new.html',htmlfilename)
filedir = os.path.split(os.path.abspath(htmlfilename))[0]
with open(htmlfilename) as infile:
lines = infile.read()
soup = BeautifulSoup(lines)
imgs = soup.find_all('img')
# 把img的绝对路径修改为相对路径,并把img复制到html文件所在目录
for img in imgs:
if os.path.isabs(img['src']):
imgfile = os.path.split(img['src'])[1]
if not os.path.exists(filedir+'/'+imgfile):
try:
shutil.copy(img['src'],filedir)
print "Copy %s to %s"%(img['src'],filedir)
except IOError as e:
print e
img['src'] = imgfile
# 生成新的html文件
with open(outfilename,'w+') as outfile:
outstr = soup.prettify()
outfile.write(outstr.encode('utf8'))
|
menglingjie/transharoopadhtml
|
transhtml.py
|
Python
|
apache-2.0
| 1,033
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 João Pedro Rodrigues
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deletes all atoms matching specific chains in the PDB file.
Usage:
python pdb_delchain.py -<option> <pdb file>
Example:
python pdb_delchain.py -A 1CTF.pdb # removes chain A from PDB file
python pdb_delchain.py -A,B 1CTF.pdb # removes chains A and B from PDB file
This program is part of the `pdb-tools` suite of utilities and should not be
distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB
files using the terminal, and can be used sequentially, with one tool streaming
data to another. They are based on old FORTRAN77 code that was taking too much
effort to maintain and compile. RIP.
"""
import os
import sys
__author__ = ["Joao Rodrigues", "Joao M.C. Teixeira"]
__email__ = ["j.p.g.l.m.rodrigues@gmail.com", "joaomcteixeira@gmail.com"]
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = ''
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# One of two options: option & Pipe OR file & default option
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty(): # ensure the PDB data is streamed in
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif len(args) == 2:
# Two options: option & File
if not args[0].startswith('-'):
emsg = 'ERROR! First argument is not an option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not os.path.isfile(args[1]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
# Validate option
option_set = set([o.upper().strip() for o in option.split(',') if o.strip()])
if not option_set:
emsg = 'ERROR!! You must provide at least one chain identifier\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
for chain_id in option_set:
if len(chain_id) > 1:
emsg = 'ERROR!! Chain identifier name is invalid: \'{}\'\n'
sys.stderr.write(emsg.format(chain_id))
sys.stderr.write(__doc__)
sys.exit(1)
return (option_set, fh)
def delete_chain(fhandle, chain_set):
"""Removes specific chains from the structure.
"""
records = ('ATOM', 'HETATM', 'TER', 'ANISOU')
for line in fhandle:
if line.startswith(records):
if line[21] in chain_set:
continue
yield line
def main():
# Check Input
element, pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = delete_chain(pdbfh, element)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
JoaoRodrigues/pdb-tools
|
pdbtools/pdb_delchain.py
|
Python
|
apache-2.0
| 4,832
|
"""
Write a script that will create a static webpage served out of Cloud Files.
The script must create a new container, cdn enable it, enable it to serve an index file, create an index file object,
upload the object to the container, and create a CNAME record pointing to the CDN URL of the container.
"""
__author__ = 'Bruce Stringer'
import pyrax
import os
from urlparse import urlparse
def auth(credential_location="~/.rackspace_cloud_credentials"):
"""
Loads the pyrax credentials from ~/.rackspace_cloud_credentials
:param credential_location: The location containing the credential ini
:return:
"""
credentials = os.path.expanduser(credential_location)
pyrax.set_credential_file(credentials)
def get_int_input(message="Enter an integer: "):
"""
Gets a valid int input from the user. If a valid integer is not entered, get_int_input calls itself again.
:param message: The message to be displayed to the user when gathering input.
:return: A valid integer
"""
try:
choice_str = input(message)
choice = int(choice_str)
return choice
except (ValueError, SyntaxError, NameError):
print "Invalid Input"
get_int_input(message)
def get_str_input(message="Enter a string: "):
"""
Gets a valid str input from the user. If a valid integer is not entered, get_int_input calls itself again.
:param message: The message to be displayed to the user when gathering input.
:return: A valid integer
"""
try:
input_str = raw_input(message)
if input_str == "":
get_str_input(message)
return input_str
except (ValueError, SyntaxError, NameError):
print "Invalid Input"
get_str_input(message)
def create_cloudfiles_container(cf_client, name):
container = cf_client.create_container(name)
print "Created container: " + name
return container
def create_index(container, content, index_name="index.html"):
index = container.client.store_object(container, index_name, content)
container.set_web_index_page(index_name)
return index
def select_domain_from_list(dns_client, message="Please select a container by number: "):
"""
Offers the user a list of domains to select from based on the passed client
:param dns_client: A pyrax cloudserver client object with its auth already initialized.
:return: A cloudfiles object based on the selection
"""
domains = dns_client.list()
for num in range(len(domains)):
print num, ") Server name:", domains[num].name
choice = get_int_input(message)
domain = None
if 0 <= choice <= len(domains) - 1:
domain = domains[choice]
else:
print "Invalid Choice: ", choice
select_domain_from_list(dns_client)
return domain
def add_record(domain, fqdn, record_type, data, priority="", ttl=300):
record_types = ["A", "CNAME", "MX", "NS", "SRV", "TXT"]
record_type = str(record_type).upper()
if record_type not in record_types:
raise ValueError("Not a valid record type.")
elif ttl < 300 or ttl > 86400:
raise ValueError("Invalid TTYL. Should be between 300 and 86400")
record = {
'type': record_type,
'name': fqdn,
'data': data,
'ttl': ttl,
}
if record_type == "MX":
if priority < 0 or priority > 65535:
raise ValueError("Invalid priority. Should be between 0 and 65535")
record['priority'] = priority
try:
generated_record = domain.add_records(record)
except (pyrax.exc.BadRequest, pyrax.exc.DomainRecordAdditionFailed) as e:
raise
return generated_record
def main():
auth()
cf_client = pyrax.cloudfiles
dns_client = pyrax.cloud_dns
#create new container
container_name = get_str_input("Please enter the name for your new container: ")
container = create_cloudfiles_container(cf_client, container_name)
#cdn enable container
ttl = get_int_input("What would you like your CDN ttl set to in seconds (Ex. 900): ")
container.make_public(ttl=ttl)
#create index file object
index_content = get_str_input("Please enter a hello world message that will be present in your index: ")
index = create_index(container, index_content)
#select domain
domain = select_domain_from_list(dns_client, "Please select the domain you wish to add the cname for this page to: ")
cname_url = get_str_input("Please enter the subdomain you would like to point the cname to: ")
fqdn = cname_url + "." + domain.name
print "FQDN selected: " + fqdn
cdn_uri = container.cdn_uri
url = urlparse(cdn_uri).netloc
#create cname to CDN url
record = add_record(domain, fqdn, "CNAME", url)
print record[0].name, " pointed to ", url
if __name__ == "__main__":
main()
|
Gifflen/API-Challenge
|
challenge08.py
|
Python
|
apache-2.0
| 4,868
|
from abc import ABCMeta, abstractmethod
class Generic_RFSigGen():
"""Generic RF signal generator class used for hardware abstraction.
All of the methods listed here are abstract and will be overridden by child classes,
this will abstract hardware as the methods here are called, but the functionality is
implemented by the individual children.
Attributes:
Output_Power (float/str): The output power of the RF SigGen. As a float default units will be
dBm, using a string new units can be selected.
Frequency (float/str): The frequency output from the RF SigGen. As a float default units will be
MHz, using a string new units can be selected.
Output_state (bool): RF output enabled flag.
DeviceID (str): The specific ID or model of the SigGen.
"""
__metaclass__ = ABCMeta # Allows for abstract methods to be created.
Output_Power = "0DBM"
Frequency = "0DBM"
Output_State = False
DeviceID = 'Abstract Device Class'
def _split_num_char(self, s):
"""Private method to split up a numeric and characters.
This should be used to split a value with it's units into two separate strings.
i.e. "-70.5 dBm" will return "-70.5" and "dBm" separately.
Args:
s (str): The string that is to be disseminated.
Returns:
str: The numeric characters in the string provided.
str: The non numeric characters in the string provided.
"""
number = ''
unit = ''
s = str(s)
for c in s:
if c.isdigit() or c == "." or c == "-":
number += c
else:
unit += c
return (number, unit)
@abstractmethod
def get_device_ID(self):
"""Abstract method for override that will return device ID.
Returns:
str: The DeviceID of the SigGen.
"""
pass
@abstractmethod
def set_frequency(self,frequency):
"""Abstract method for override that will set the output frequency.
Args:
frequency (float/str): Desired value of the output frequency. If a float is sent the default
units of MHz are used, otherwise using a string different units can be selected.
Returns:
float: The current frequency value as a float and assumed units.
str: The current output frequency concatenated with the units.
"""
pass
@abstractmethod
def get_frequency(self):
"""Abstract method for override that will get the output frequency of the SigGen
Args:
Returns:
float: The current frequency value as a float and assumed units.
str: The current output frequency concatenated with the units.
"""
pass
@abstractmethod
def set_output_power(self, power):
"""Abstract method for override that will set the output power.
Args:
power (float/str): Desired value of the output power. If a float is sent the default
units of dBm are used, otherwise using a string different units can be selected.
Returns:
float: The current power value as a float and assumed units.
str: The current output power concatenated with the units.
"""
pass
@abstractmethod
def get_output_power(self):
"""Abstract method for override that will return the output power.
Args:
Returns:
float: The current power value as a float and assumed units.
str: The current output power concatenated with the units.
"""
pass
@abstractmethod
def turn_on_RF(self):
"""Abstract method for override that will turn on the RF device output.
Args:
Returns:
bool: Returns True if the output is enabled, False if it is not.
"""
pass
@abstractmethod
def turn_off_RF(self):
"""Abstract method for override that will turn off the RF device output.
Args:
Returns:
bool: Returns True if the output is enabled, False if it is not.
"""
pass
@abstractmethod
def get_output_state(self):
"""Abstract method for override that will get the current output state.
Args:
Returns:
bool: Returns True if the output is enabled, False if it is not.
"""
pass
@abstractmethod
def set_output_power_limit(self, limit):
"""Abstract method for override that will set a hardware limit for the output power
Args:
Returns:
float: The power limit
"""
pass
@abstractmethod
def get_output_power_limit(self):
"""Abstract method for override that will get the hardware limit for the output power
Args:
Returns:
float: The power limit
"""
pass
|
dharryman/BPM_Test_Framework
|
RFSignalGenerators/Generic_RFSigGen.py
|
Python
|
apache-2.0
| 5,154
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testHandleDtypeShapeMatch(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([0], dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testDtypeSurvivesIdentity(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testCreateRead(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
value = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32).eval()
self.assertAllEqual(1, value)
def testManyAssigns(self):
with self.test_session() as session:
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = session.run([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
def testAssignAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(read.eval(), 2)
def testScatterAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)).run()
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)).run()
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(read.eval(), [[3]])
def testGPU(self):
with self.test_session(use_gpu=True) as sess:
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
sess.run(variables.global_variables_initializer())
print(sess.run(abc))
def testInitFn(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testInitFnDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, v.value().dtype)
def testInitFnNoDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1)
self.assertEqual(dtypes.int32, v.value().dtype)
def testInitializeAllVariables(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32)
with self.assertRaises(errors.NotFoundError):
v.value().eval()
variables.global_variables_initializer().run()
self.assertEqual(1.0, v.value().eval())
def testOperatorOverload(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.assertEqual(2.0, (v+v).eval())
def testAssignMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.assign(2.0).eval()
self.assertEqual(2.0, v.value().eval())
def testToFromProto(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
def testAssignAddMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.assign_add(1.0).eval()
self.assertEqual(2.0, v.value().eval())
def testAssignSubMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
v.assign_sub(1.0).eval()
self.assertEqual(2.0, v.value().eval())
def testDestroyResource(self):
with self.test_session() as sess:
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
self.assertEqual(3.0, v.value().eval())
sess.run(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.NotFoundError):
v.value().eval()
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
sess.run(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
def testAssignDifferentShapes(self):
with self.test_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run([assign],
feed_dict={placeholder: np.zeros(shape=[2, 2],
dtype=np.float32)})
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = w.value().op.get_attr("_class")
def testSharedName(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var1")
v.initializer.run()
w = resource_variable_ops.var_handle_op(dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var1")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, w_read.eval())
x = resource_variable_ops.var_handle_op(dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var1/")
x_read = resource_variable_ops.read_variable_op(x, v.dtype.base_dtype)
with self.assertRaisesOpError("Resource .*/var1//.* does not exist"):
_ = x_read.eval()
def testSetInitialValue(self):
with self.test_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
if __name__ == "__main__":
test.main()
|
alisidd/tensorflow
|
tensorflow/python/kernel_tests/resource_variable_ops_test.py
|
Python
|
apache-2.0
| 10,612
|
# *- coding: utf-8 -*-
# pylint: disable=wildcard-import, missing-docstring, no-self-use, bad-continuation
# pylint: disable=unused-import
""" Test ``rudiments.morph``.
"""
# Copyright © 2015 - 2019 Jürgen Hermann <jh@web.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
import pytest
from rudiments.morph import *
#class FoobarTests(object):
# def test_foobar(self):
# assert False
|
jhermann/rudiments
|
src/tests/test_morph.py
|
Python
|
apache-2.0
| 975
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick import encryptors
from oslo_serialization import jsonutils
from nova import block_device
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.tests import uuidsentinel as uuids
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import fake as fake_virt
from nova.volume import cinder
class TestDriverBlockDevice(test.NoDBTestCase):
# This is used to signal if we're dealing with a new style volume
# attachment (Cinder v3.44 flow).
attachment_id = None
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
'volblank': driver_block_device.DriverVolBlankBlockDevice
}
swap_bdm_dict = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm_dict = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
volsnapshot_bdm_dict = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
volsnapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
volsnapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
volimage_bdm_dict = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
volimage_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
volimage_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
volblank_bdm_dict = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
volblank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
volblank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
self.context, self.volume_bdm_dict)
self.volsnapshot_bdm = fake_block_device.fake_bdm_object(
self.context, self.volsnapshot_bdm_dict)
self.volimage_bdm = fake_block_device.fake_bdm_object(
self.context, self.volimage_bdm_dict)
self.volblank_bdm = fake_block_device.fake_bdm_object(
self.context, self.volblank_bdm_dict)
# Set the attachment_id on our fake class variables which we have
# to do in setUp so that any attachment_id set by a subclass will
# be used properly.
for name in ('volume', 'volsnapshot', 'volimage', 'volblank'):
for attr in ('%s_bdm', '%s_driver_bdm'):
bdm = getattr(self, attr % name)
bdm['attachment_id'] = self.attachment_id
@mock.patch('nova.virt.block_device.LOG')
@mock.patch('os_brick.encryptors')
def test_driver_detach_passes_failed(self, enc, log):
virt = mock.MagicMock()
virt.detach_volume.side_effect = exception.DeviceDetachFailed(
device='sda', reason='because testing')
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
inst = mock.MagicMock(),
vol_api = mock.MagicMock()
# Make sure we pass through DeviceDetachFailed,
# but don't log it as an exception, just a warning
self.assertRaises(exception.DeviceDetachFailed,
driver_bdm.driver_detach,
self.context, inst, vol_api, virt)
self.assertFalse(log.exception.called)
self.assertTrue(log.warning.called)
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
bdm = fake_block_device.fake_bdm_object(
self.context, {'no_device': True})
self.assertRaises(driver_block_device._NotTransformable,
cls, bdm)
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
expected = getattr(self, "%s_driver_bdm" % name)
self.assertThat(expected, matchers.DictMatches(test_bdm))
for k, v in db_bdm.items():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
for field, value in expected.items():
# Test that all driver bdm fields are available as both attrs and
# dict values
self.assertEqual(test_bdm[field], value)
self.assertEqual(getattr(test_bdm, field), value)
test_value = mock.sentinel.value
if field in test_bdm._proxy_as_attr:
# We can't set a versioned object field to a sentinel because
# it's an invalid type. It's not worth creating valid example
# values for all possible field types just for this, so we just
# test setting it to its current value. This at least
# exercises the code path without being a maintenance burden.
test_value = value
# Test that we can set values via either attribute or dict
test_bdm[field] = test_value
self.assertEqual(getattr(test_bdm, field), test_value)
setattr(test_bdm, field, value)
self.assertEqual(test_bdm[field], value)
# Reset the value
test_bdm[field] = value
expected = getattr(self, "%s_legacy_driver_bdm" % name)
self.assertThat(expected, matchers.DictMatches(test_bdm.legacy()))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.items():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
for fld, alias in test_bdm._update_on_save.items():
# We can't set fake values on enums, like device_type,
# so skip those.
if not isinstance(test_bdm._bdm_obj.fields[fld],
fields.BaseEnumField):
test_bdm[alias or fld] = 'fake_changed_value'
test_bdm.save()
for fld, alias in test_bdm._update_on_save.items():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with()
def check_save():
self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed())
# Test that nothing is set on the object if there are no actual changes
test_bdm._bdm_obj.obj_reset_changes()
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
save_mock.side_effect = check_save
test_bdm.save()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("volsnapshot")
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
self.volimage_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['volimage'],
fake_block_device.fake_bdm_object(self.context, bdm))
def test_driver_blank_block_device(self):
self._test_driver_device('volblank')
test_bdm = self.driver_classes['volblank'](
self.volblank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
with mock.patch.object(self.volume_api, 'delete') as vol_delete:
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
if delete_on_termination and delete_fail:
vol_delete.side_effect = Exception()
self.assertRaises(exception.VolumeNotCreated,
test_bdm._call_wait_func,
context=self.context,
wait_func=wait_func,
volume_api=self.volume_api,
volume_id='fake-id')
self.assertEqual(delete_on_termination, vol_delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
def test_call_wait_delete_volume_fail(self):
self._test_call_wait_func(True, True)
def test_call_wait_no_delete_volume(self):
self._test_call_wait_func(False)
def test_volume_delete_attachment(self, include_shared_targets=False):
attachment_id = uuids.attachment
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
driver_bdm['attachment_id'] = attachment_id
elevated_context = self.context.elevated()
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': None}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
volume = {'id': driver_bdm.volume_id,
'attach_status': 'attached',
'status': 'in-use'}
if include_shared_targets:
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
mock.patch.object(self.virt_driver, 'get_volume_connector',
return_value=connector),
mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args)),
mock.patch.object(self.volume_api, 'attachment_delete'),
) as (mock_get_volume, mock_get_connector, mock_sync, vapi_attach_del):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
if include_shared_targets:
mock_sync.assert_called_once_with((uuids.service_uuid))
vapi_attach_del.assert_called_once_with(elevated_context,
attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, fail_check_av_zone=False,
driver_attach=False, fail_driver_attach=False,
volume_attach=True, fail_volume_attach=False,
access_mode='rw', availability_zone=None,
multiattach=False, driver_multi_attach=False,
fail_with_virt_driver=False,
include_shared_targets=False):
if driver_multi_attach:
self.virt_driver.capabilities['supports_multiattach'] = True
else:
self.virt_driver.capabilities['supports_multiattach'] = False
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': availability_zone}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
if multiattach and driver_multi_attach:
expected_conn_info['multiattach'] = True
enc_data = {'fake': 'enc_data'}
if include_shared_targets:
fake_volume['shared_targets'] = True
fake_volume['service_uuid'] = uuids.service_uuid
self.volume_api.get(
self.context, fake_volume['id'],
microversion='3.48').AndReturn(fake_volume)
else:
# First call to get() fails because the API isn't new enough.
self.volume_api.get(
self.context, fake_volume['id'], microversion='3.48').AndRaise(
exception.CinderAPIVersionNotAvailable(version='3.48'))
# So we fallback to the old call.
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if not fail_check_av_zone:
self.volume_api.check_availability_zone(self.context,
fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_availability_zone(self.context,
fake_volume,
instance=instance).AndRaise(
test.TestingException)
# The @update_db decorator will save any changes.
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
if fail_with_virt_driver:
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if self.attachment_id is None:
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
else:
self.volume_api.attachment_update(
elevated_context, self.attachment_id, connector,
bdm_dict['device_name']).AndReturn(
{'connection_info': connection_info})
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
if self.attachment_id is None:
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
else:
self.volume_api.attachment_delete(
elevated_context, self.attachment_id).AndReturn(None)
# The @update_db decorator will save any changes.
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if volume_attach:
# save updates before marking the volume as in-use
driver_bdm._bdm_obj.save().AndReturn(None)
if not fail_volume_attach:
if self.attachment_id is None:
self.volume_api.attach(elevated_context, fake_volume['id'],
uuids.uuid, bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
else:
self.volume_api.attachment_complete(
elevated_context, self.attachment_id).AndReturn(None)
else:
if self.attachment_id is None:
self.volume_api.attach(elevated_context, fake_volume['id'],
uuids.uuid, bdm_dict['device_name'],
mode=access_mode).AndRaise(
test.TestingException)
if driver_attach:
self.virt_driver.detach_volume(
self.context, expected_conn_info, instance,
bdm_dict['device_name'],
encryption=enc_data).AndReturn(None)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
self.volume_api.detach(elevated_context,
fake_volume['id']).AndReturn(None)
else:
self.volume_api.attachment_complete(
elevated_context, self.attachment_id).AndRaise(
test.TestingException)
self.volume_api.attachment_delete(
elevated_context, self.attachment_id).AndReturn(None)
# The @update_db decorator will save any changes.
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self, include_shared_targets=False):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume,
include_shared_targets=include_shared_targets)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_with_shared_targets(self):
self.test_volume_attach(include_shared_targets=True)
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_update_size(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm.volume_size = None
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached',
'size': 42}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertEqual(expected_conn_info, test_bdm['connection_info'])
self.assertEqual(42, test_bdm.volume_size)
def test_volume_attach_check_av_zone_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_av_zone=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_driver_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
@mock.patch('nova.objects.BlockDeviceMapping.save')
@mock.patch('nova.volume.cinder.API')
@mock.patch('os_brick.encryptors.get_encryption_metadata',
return_value={})
def test_volume_attach_volume_attach_fails(self, mock_get_encryption,
mock_volume_api, mock_bdm_save):
"""Tests that attaching the volume fails and driver rollback occurs."""
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
mock_volume_api.get.return_value = volume
instance = fake_instance.fake_instance_obj(self.context)
virt_driver = fake_virt.SmallFakeDriver(virtapi=mock.MagicMock())
fake_conn_info = {
'serial': volume['id'],
'data': {
'foo': 'bar'
}
}
if self.attachment_id:
mock_volume_api.attachment_update.return_value = {
'connection_info': fake_conn_info
}
mock_volume_api.attachment_complete.side_effect = (
test.TestingException)
else:
# legacy flow, stub out the volume_api accordingly
mock_volume_api.attach.side_effect = test.TestingException
mock_volume_api.initialize_connection.return_value = fake_conn_info
with mock.patch.object(virt_driver, 'detach_volume') as drvr_detach:
with mock.patch.object(self.context, 'elevated',
return_value=self.context):
self.assertRaises(test.TestingException, test_bdm.attach,
self.context, instance, mock_volume_api,
virt_driver, do_driver_attach=True)
drvr_detach.assert_called_once_with(
self.context, fake_conn_info, instance,
self.volume_bdm.device_name,
encryption=mock_get_encryption.return_value)
if self.attachment_id:
mock_volume_api.attachment_delete.assert_called_once_with(
self.context, self.attachment_id)
else:
mock_volume_api.terminate_connection.assert_called_once_with(
self.context, volume['id'],
virt_driver.get_volume_connector(instance))
mock_volume_api.detach.assert_called_once_with(
self.context, volume['id'])
self.assertEqual(2, mock_bdm_save.call_count)
def test_volume_attach_no_driver_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_volume_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=False)
def test_refresh_connection(self):
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
if self.attachment_id is None:
self.virt_driver.get_volume_connector(instance).AndReturn(
connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
else:
self.volume_api.attachment_get(
self.context, self.attachment_id).AndReturn(
{'connection_info': connection_info})
test_bdm._bdm_obj.save().AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the snapshot has the same AZ as
# the instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone='test-az').AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume,
availability_zone='test-az')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_snapshot_attach_fail_volume(self):
fail_volume_snapshot = self.volsnapshot_bdm_dict.copy()
fail_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_get_snap, vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_get_snap.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
vol_create.assert_called_once_with(
self.context, 3, '', '', snapshot, availability_zone=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the image has the same AZ as the
# instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone='test-az').AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume,
availability_zone='test-az')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_image_attach_fail_volume(self):
fail_volume_image = self.volimage_bdm_dict.copy()
fail_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
self.volimage_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', availability_zone=None)
vol_delete.assert_called_once_with(
self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', availability_zone=None)
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_blank_attach_volume_cinder_cross_az_attach_false(self):
# Tests that the blank volume created is in the same availability zone
# as the instance.
self.flags(cross_az_attach=False, group='cinder')
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with mock.patch.object(self.volume_api, 'create',
return_value=volume) as vol_create:
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', availability_zone='test-az')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
bdms = objects.BlockDeviceMappingList(
objects=[self.volume_bdm, self.ephemeral_bdm])
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'], bdms)
self.assertEqual(converted, [self.volume_driver_bdm])
def test_convert_all_volumes(self):
converted = driver_block_device.convert_all_volumes()
self.assertEqual([], converted)
converted = driver_block_device.convert_all_volumes(
self.volume_bdm, self.ephemeral_bdm, self.volimage_bdm,
self.volblank_bdm, self.volsnapshot_bdm)
self.assertEqual(converted, [self.volume_driver_bdm,
self.volimage_driver_bdm,
self.volblank_driver_bdm,
self.volsnapshot_driver_bdm])
def test_convert_volume(self):
self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm))
self.assertEqual(self.volume_driver_bdm,
driver_block_device.convert_volume(self.volume_bdm))
self.assertEqual(self.volsnapshot_driver_bdm,
driver_block_device.convert_volume(
self.volsnapshot_bdm))
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.volsnapshot_legacy_driver_bdm,
self.volsnapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in range(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in range(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.volsnapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.volimage_bdm_dict.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(
fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['volimage'](self.volimage_bdm)
test_snapshot = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['volblank'](self.volblank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
def test_get_volume_create_az_cinder_cross_az_attach_true(self):
# Tests that we get None back if cinder.cross_az_attach=True even if
# the instance has an AZ assigned. Note that since cross_az_attach
# defaults to True we don't need to set a flag explicitly for the test.
updates = {'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(self.context, **updates)
self.assertIsNone(
driver_block_device._get_volume_create_az_value(instance))
def test_refresh_conn_infos(self):
# Only DriverVolumeBlockDevice derived devices should refresh their
# connection_info during a refresh_conn_infos call.
test_volume = mock.MagicMock(
spec=driver_block_device.DriverVolumeBlockDevice)
test_image = mock.MagicMock(
spec=driver_block_device.DriverVolImageBlockDevice)
test_snapshot = mock.MagicMock(
spec=driver_block_device.DriverVolSnapshotBlockDevice)
test_blank = mock.MagicMock(
spec=driver_block_device.DriverVolBlankBlockDevice)
test_eph = mock.MagicMock(
spec=driver_block_device.DriverEphemeralBlockDevice)
test_swap = mock.MagicMock(
spec=driver_block_device.DriverSwapBlockDevice)
block_device_mapping = [test_volume, test_image, test_eph,
test_snapshot, test_swap, test_blank]
driver_block_device.refresh_conn_infos(block_device_mapping,
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
for test_mock in [test_volume, test_image, test_snapshot, test_blank]:
test_mock.refresh_connection_info.assert_called_once_with(
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
# NOTE(lyarwood): Can't think of a better way of testing this as we
# can't assert_not_called if the method isn't in the spec.
self.assertFalse(hasattr(test_eph, 'refresh_connection_info'))
self.assertFalse(hasattr(test_swap, 'refresh_connection_info'))
def test_proxy_as_attr(self):
class A(driver_block_device.DriverBlockDevice):
pass
def _transform(self):
pass
class B(A):
_proxy_as_attr_inherited = set('B')
class C(A):
_proxy_as_attr_inherited = set('C')
class D(B):
_proxy_as_attr_inherited = set('D')
class E(B, C):
_proxy_as_attr_inherited = set('E')
bdm = objects.BlockDeviceMapping(self.context, no_device=False)
self.assertEqual(set(['uuid', 'is_volume']), A(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B']),
B(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'C']),
C(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B', 'D']),
D(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B', 'C', 'E']),
E(bdm)._proxy_as_attr)
class TestDriverBlockDeviceNewFlow(TestDriverBlockDevice):
"""Virt block_device tests for the Cinder 3.44 volume attach flow
where a volume BDM has an attachment_id.
"""
attachment_id = uuids.attachment_id
def test_volume_attach_multiattach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, multiattach=True,
driver_multi_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_multiattach_no_virt_driver_support(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
instance, _ = self._test_volume_attach(test_bdm, self.volume_bdm,
volume, multiattach=True,
fail_with_virt_driver=True)
self.mox.ReplayAll()
self.assertRaises(exception.MultiattachNotSupportedByVirtDriver,
test_bdm.attach, self.context, instance,
self.volume_api, self.virt_driver)
@mock.patch('nova.objects.BlockDeviceMapping.save')
def test_refresh_connection_preserve_multiattach(self, mock_bdm_save):
"""Tests that we've already attached a multiattach-capable volume
and when refreshing the connection_info from the attachment record,
the multiattach flag in the bdm.connection_info is preserved.
"""
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['connection_info']['multiattach'] = True
volume_api = mock.Mock()
volume_api.attachment_get.return_value = {
'connection_info': {
'data': {
'some': 'goodies'
}
}
}
test_bdm.refresh_connection_info(
self.context, mock.sentinel.instance,
volume_api, mock.sentinel.virt_driver)
volume_api.attachment_get.assert_called_once_with(
self.context, self.attachment_id)
mock_bdm_save.assert_called_once_with()
expected_connection_info = {
'data': {
'some': 'goodies'
},
'serial': self.volume_bdm.volume_id,
'multiattach': True
}
self.assertDictEqual(expected_connection_info,
test_bdm['connection_info'])
class TestGetVolumeId(test.NoDBTestCase):
def test_get_volume_id_none_found(self):
self.assertIsNone(driver_block_device.get_volume_id(None))
self.assertIsNone(driver_block_device.get_volume_id({}))
self.assertIsNone(driver_block_device.get_volume_id({'data': {}}))
def test_get_volume_id_found_volume_id_no_serial(self):
self.assertEqual(uuids.volume_id,
driver_block_device.get_volume_id(
{'data': {'volume_id': uuids.volume_id}}))
def test_get_volume_id_found_no_volume_id_serial(self):
self.assertEqual(uuids.serial,
driver_block_device.get_volume_id(
{'serial': uuids.serial}))
def test_get_volume_id_found_both(self):
# volume_id is taken over serial
self.assertEqual(uuids.volume_id,
driver_block_device.get_volume_id(
{'serial': uuids.serial,
'data': {'volume_id': uuids.volume_id}}))
|
phenoxim/nova
|
nova/tests/unit/virt/test_block_device.py
|
Python
|
apache-2.0
| 62,888
|
import json
import httplib2
from samples.utils import get_service, poll_job
from oauth2client.client import GoogleCredentials
# [START make_post]
def make_post(http, schema, data, projectId, datasetId, tableId):
url = ('https://www.googleapis.com/upload/bigquery/v2/projects/' +
projectId + '/jobs')
# Create the body of the request, separated by a boundary of xxx
resource = ('--xxx\n' +
'Content-Type: application/json; charset=UTF-8\n' + '\n' +
'{\n' +
' "configuration": {\n' +
' "load": {\n' +
' "schema": {\n'
' "fields": ' + str(schema) + '\n' +
' },\n' +
' "destinationTable": {\n' +
' "projectId": "' + projectId + '",\n' +
' "datasetId": "' + datasetId + '",\n' +
' "tableId": "' + tableId + '"\n' +
' }\n' +
' }\n' +
' }\n' +
'}\n' +
'--xxx\n' +
'Content-Type: application/octet-stream\n' +
'\n')
# Append data to the request body
resource += data
# Signify the end of the body
resource += ('--xxx--\n')
headers = {'Content-Type': 'multipart/related; boundary=xxx'}
return http.request(url,
method='POST',
body=resource,
headers=headers)
# [END make_post]
# [START main]
def main():
credentials = GoogleCredentials.get_application_default()
http = credentials.authorize(httplib2.Http())
projectId = raw_input('Enter the project ID: ')
datasetId = raw_input('Enter a dataset ID: ')
tableId = raw_input('Enter a table name to load the data to: ')
schema_path = raw_input(
'Enter the path to the schema file for the table: ')
with open(schema_path, 'r') as schema_file:
schema = schema_file.read()
data_path = raw_input('Enter the path to the data file: ')
with open(data_path, 'r') as data_file:
data = data_file.read()
resp, content = make_post(http,
schema,
data,
projectId,
datasetId,
tableId)
if resp.status == 200:
job_resource = json.loads(content)
service = get_service(credentials)
poll_job(service, **job_resource['jobReference'])
print("Success!")
else:
print("Http error code: {}".format(resp.status))
# [END main]
if __name__ == '__main__':
main()
|
googlearchive/bigquery-samples-python
|
python/samples/load_data_by_post.py
|
Python
|
apache-2.0
| 2,723
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for McAfee Anti-Virus Logs.
McAfee AV uses 4 logs to track when scans were run, when virus databases were
updated, and when files match the virus database."""
import logging
from plaso.lib import event
from plaso.lib import text_parser
from plaso.lib import timelib
class McafeeAVEvent(event.TextEvent):
"""Convenience class for McAfee AV Log events """
DATA_TYPE = 'av:mcafee:accessprotectionlog'
def __init__(self, timestamp, attributes):
"""Initializes a McAfee AV Log Event.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of seconds since Jan 1, 1970 00:00:00 UTC.
attributes: Dict of elements from the AV log line.
"""
del attributes['time']
del attributes['date']
super(McafeeAVEvent, self).__init__(timestamp, attributes)
self.full_path = attributes['filename']
class McafeeAccessProtectionParser(text_parser.TextCSVParser):
"""Parses the McAfee AV Access Protection Log."""
NAME = 'mcafee_protection'
VALUE_SEPARATOR = '\t'
# Define the columns of the McAfee AV Access Protection Log.
COLUMNS = ['date', 'time', 'status', 'username', 'filename',
'trigger_location', 'rule', 'action']
def VerifyRow(self, row):
"""Verify that this is a McAfee AV Access Protection Log file."""
if len(row) != 8:
return False
# This file can have the UTF-8 marker at the beginning of the first row.
# TODO: Find out all the code pages this can have. Asked McAfee 10/31.
if row['date'][0:3] == '\xef\xbb\xbf':
row['date'] = row['date'][3:]
# Check the date format!
# If it doesn't pass, then this isn't a McAfee AV Access Protection Log
try:
self.GetTimestamp(row['date'], row['time'])
except (TypeError, ValueError):
return False
# Use the presence of these strings as a backup or incase of partial file.
if (not 'Access Protection' in row['status'] and
not 'Would be blocked' in row['status']):
return False
return True
def GetTimestamp(self, date, time):
"""Return a 64-bit signed timestamp in microseconds since Epoch.
The timestamp is made up of two strings, the date and the time, separated
by a tab. The time is in local time. The month and day can be either 1 or 2
characters long. E.g.: 7/30/2013\t10:22:48 AM
Args:
date: The string representing the date.
time: The string representing the time.
Returns:
A plaso timestamp value, microseconds since Epoch in UTC.
"""
if not (date and time):
logging.warning('Unable to extract timestamp from McAfee AV logline.')
return
# TODO: Figure out how McAfee sets Day First and use that here.
# The in-file time format is '07/30/2013\t10:22:48 AM'.
timestamp = timelib.Timestamp.FromTimeString(
u'{0:s} {1:s}'.format(date, time), timezone=self._pre_obj.zone)
return timestamp
def ParseRow(self, row):
"""Parse a single row from the McAfee Access Protection Log file."""
epoch = self.GetTimestamp(row['date'], row['time'])
yield McafeeAVEvent(epoch, row)
|
iwm911/plaso
|
plaso/parsers/mcafeeav.py
|
Python
|
apache-2.0
| 3,840
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from arrow.arrow import Arrow
from datetime import timedelta
from six import PY2, PY3, StringIO, string_types, text_type, integer_types
from six.moves import filter, map, range
from uuid import uuid4
import arrow
import re
from . import parse
tzutc = arrow.utcnow().tzinfo
def remove_x(container):
for i in reversed(range(len(container))):
item = container[i]
if item.name.startswith('X-'):
del container[i]
def iso_to_arrow(time_container, available_tz={}):
if time_container is None:
return None
# TODO : raise if not iso date
tz_list = time_container.params.get('TZID')
# TODO : raise if len(tz_list) > 1 or if tz is not a valid tz
# TODO : see if timezone is registered as a VTIMEZONE
if tz_list and len(tz_list) > 0:
tz = tz_list[0]
else:
tz = None
if ('T' not in time_container.value) and \
'DATE' in time_container.params.get('VALUE', []):
val = time_container.value + 'T0000'
else:
val = time_container.value
if tz and not (val[-1].upper() == 'Z'):
naive = arrow.get(val).naive
selected_tz = available_tz.get(tz, 'UTC')
return arrow.get(naive, selected_tz)
else:
return arrow.get(val)
# TODO : support floating (ie not bound to any time zone) times (cf
# http://www.kanzaki.com/docs/ical/dateTime.html)
def iso_precision(string):
has_time = 'T' in string
if has_time:
date_string, time_string = string.split('T', 1)
time_parts = re.split('[+-]', time_string, 1)
has_seconds = time_parts[0].count(':') > 1
has_seconds = not has_seconds and len(time_parts[0]) == 6
if has_seconds:
return 'second'
else:
return 'minute'
else:
return 'day'
def get_lines(container, name):
lines = []
for i in reversed(range(len(container))):
item = container[i]
if item.name == name:
lines.append(item)
del container[i]
return lines
def parse_duration(line):
"""
Return a timedelta object from a string in the DURATION property format
"""
DAYS, SECS = {'D': 1, 'W': 7}, {'S': 1, 'M': 60, 'H': 3600}
sign, i = 1, 0
if line[i] in '-+':
if line[i] == '-':
sign = -1
i += 1
if line[i] != 'P':
raise parse.ParseError()
i += 1
days, secs = 0, 0
while i < len(line):
if line[i] == 'T':
i += 1
if i == len(line):
break
j = i
while line[j].isdigit():
j += 1
if i == j:
raise parse.ParseError()
val = int(line[i:j])
if line[j] in DAYS:
days += val * DAYS[line[j]]
DAYS.pop(line[j])
elif line[j] in SECS:
secs += val * SECS[line[j]]
SECS.pop(line[j])
else:
raise parse.ParseError()
i = j + 1
return timedelta(sign * days, sign * secs)
def timedelta_to_duration(dt):
"""
Return a string according to the DURATION property format
from a timedelta object
"""
days, secs = dt.days, dt.seconds
res = 'P'
if days // 7:
res += str(days // 7) + 'W'
days %= 7
if days:
res += str(days) + 'D'
if secs:
res += 'T'
if secs // 3600:
res += str(secs // 3600) + 'H'
secs %= 3600
if secs // 60:
res += str(secs // 60) + 'M'
secs %= 60
if secs:
res += str(secs) + 'S'
return res
def get_arrow(value):
if value is None:
return None
elif isinstance(value, Arrow):
return value
elif isinstance(value, tuple):
return arrow.get(*value)
elif isinstance(value, dict):
return arrow.get(**value)
else:
return arrow.get(value)
def arrow_to_iso(instant,precision="second"):
# set to utc, make iso, remove timezone
if precision == "day":
return arrow.get(instant.astimezone(tzutc)).format('YYYYMMDD')
instant = arrow.get(instant.astimezone(tzutc)).format('YYYYMMDDTHHmmss')
return instant + 'Z'
def uid_gen():
uid = str(uuid4())
return "{}@{}.org".format(uid, uid[:4])
def escape_string(string):
string = string.replace("\\", "\\\\")
string = string.replace(";", "\\;")
string = string.replace(",", "\\,")
string = string.replace("\n", "\\n")
return string
def unescape_string(string):
string = string.replace("\\;", ";")
string = string.replace("\\,", ",")
string = string.replace("\\n", "\n")
string = string.replace("\\N", "\n")
string = string.replace("\\\\", "\\")
return string
|
zifeiwoo/icalics
|
ics/utils.py
|
Python
|
apache-2.0
| 4,846
|
# Lint as: python3
# Copyright 2019 Verily Life Sciences Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provider for running jobs on Google Cloud Platform.
This module implements job creation, listing, and canceling using the
Google Cloud Life Sciences Pipelines and Operations APIs v2beta.
"""
from . import google_v2_base
from . import google_v2_versions
_PROVIDER_NAME = 'google-cls-v2'
class GoogleCLSV2JobProvider(google_v2_base.GoogleV2JobProviderBase):
"""dsub provider implementation managing Jobs on Google Cloud."""
def __init__(self, dry_run, project, location, credentials=None):
super(GoogleCLSV2JobProvider,
self).__init__(_PROVIDER_NAME, google_v2_versions.V2BETA, credentials,
project, dry_run)
self._location = location
def _get_pipeline_regions(self, regions, zones):
"""Returns the list of regions to use for a pipeline request.
If neither regions nor zones were specified for the pipeline, then use the
v2beta location as the default region.
Args:
regions (str): A space separated list of regions to use for the pipeline.
zones (str): A space separated list of zones to use for the pipeline.
"""
if not regions and not zones:
return [self._location]
return regions or []
def _pipelines_run_api(self, request):
parent = 'projects/{}/locations/{}'.format(self._project, self._location)
return self._service.projects().locations().pipelines().run(
parent=parent, body=request)
def _operations_list_api(self, ops_filter, page_token, page_size):
name = 'projects/{}/locations/{}'.format(self._project, self._location)
return self._service.projects().locations().operations().list(
name=name, filter=ops_filter, pageToken=page_token, pageSize=page_size)
def _operations_cancel_api_def(self):
return self._service.projects().locations().operations().cancel
def _batch_handler_def(self):
"""Returns a function object for the provider-specific batch handler."""
# The batch endpoint currently only works for us-central1 requests.
if self._location != 'us-central1':
return google_v2_base.GoogleV2BatchHandler
# The Lifesciences API provides a batch endpoint
# (the Genomics v2alpha1 does not).
#
# This function returns the new_batch_http_request function, which the
# caller can then use to create a BatchHttpRequest object.
# The new_batch_http_request function is provided by the Google APIs
# Python Client for batching requests destined for the batch endpoint.
#
# For documentation, see
# https://googleapis.github.io/google-api-python-client/docs/dyn/lifesciences_v2beta.html#new_batch_http_request
#
# For example usage, see google_base.py (_cancel() and __cancel_batch()).
return self._service.new_batch_http_request
if __name__ == '__main__':
pass
|
DataBiosphere/dsub
|
dsub/providers/google_cls_v2.py
|
Python
|
apache-2.0
| 3,428
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_global_morans_i.ui'
#
# Created: Mon Jul 07 17:51:16 2014
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
|
Gaia3D/GeepsSpatialStatistic
|
ui_global_morans_i.py
|
Python
|
apache-2.0
| 240
|
# Copyright 2016 Red Hat, Inc
# Copyright 2017 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for filesystem related routines.
"""
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils.secretutils import md5
import nova.privsep
LOG = logging.getLogger(__name__)
@nova.privsep.sys_admin_pctxt.entrypoint
def mount(fstype, device, mountpoint, options):
mount_cmd = ['mount']
if fstype:
mount_cmd.extend(['-t', fstype])
if options is not None:
mount_cmd.extend(options)
mount_cmd.extend([device, mountpoint])
return processutils.execute(*mount_cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
def umount(mountpoint):
processutils.execute('umount', mountpoint, attempts=3, delay_on_retry=True)
@nova.privsep.sys_admin_pctxt.entrypoint
def lvcreate(size, lv, vg, preallocated=None):
cmd = ['lvcreate']
if not preallocated:
cmd.extend(['-L', '%db' % size])
else:
cmd.extend(['-L', '%db' % preallocated,
'--virtualsize', '%db' % size])
cmd.extend(['-n', lv, vg])
processutils.execute(*cmd, attempts=3)
@nova.privsep.sys_admin_pctxt.entrypoint
def vginfo(vg):
return processutils.execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|', '--units', 'b',
'-o', 'vg_size,vg_free', vg)
@nova.privsep.sys_admin_pctxt.entrypoint
def lvlist(vg):
return processutils.execute('lvs', '--noheadings', '-o', 'lv_name', vg)
@nova.privsep.sys_admin_pctxt.entrypoint
def lvinfo(path):
return processutils.execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path)
@nova.privsep.sys_admin_pctxt.entrypoint
def lvremove(path):
processutils.execute('lvremove', '-f', path, attempts=3)
@nova.privsep.sys_admin_pctxt.entrypoint
def blockdev_size(path):
return processutils.execute('blockdev', '--getsize64', path)
@nova.privsep.sys_admin_pctxt.entrypoint
def blockdev_flush(path):
return processutils.execute('blockdev', '--flushbufs', path)
@nova.privsep.sys_admin_pctxt.entrypoint
def clear(path, volume_size, shred=False):
cmd = ['shred']
if shred:
cmd.extend(['-n3'])
else:
cmd.extend(['-n0', '-z'])
cmd.extend(['-s%d' % volume_size, path])
processutils.execute(*cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
def loopsetup(path):
return processutils.execute('losetup', '--find', '--show', path)
@nova.privsep.sys_admin_pctxt.entrypoint
def loopremove(device):
return processutils.execute('losetup', '--detach', device, attempts=3)
@nova.privsep.sys_admin_pctxt.entrypoint
def nbd_connect(device, image):
return processutils.execute('qemu-nbd', '-c', device, image)
@nova.privsep.sys_admin_pctxt.entrypoint
def nbd_disconnect(device):
return processutils.execute('qemu-nbd', '-d', device)
@nova.privsep.sys_admin_pctxt.entrypoint
def create_device_maps(device):
return processutils.execute('kpartx', '-a', device)
@nova.privsep.sys_admin_pctxt.entrypoint
def remove_device_maps(device):
return processutils.execute('kpartx', '-d', device)
@nova.privsep.sys_admin_pctxt.entrypoint
def get_filesystem_type(device):
return processutils.execute('blkid', '-o', 'value', '-s', 'TYPE', device,
check_exit_code=[0, 2])
@nova.privsep.sys_admin_pctxt.entrypoint
def e2fsck(image, flags='-fp'):
unprivileged_e2fsck(image, flags=flags)
# NOTE(mikal): this method is deliberately not wrapped in a privsep
# entrypoint. This is not for unit testing, there are some callers who do
# not require elevated permissions when calling this.
def unprivileged_e2fsck(image, flags='-fp'):
processutils.execute('e2fsck', flags, image, check_exit_code=[0, 1, 2])
@nova.privsep.sys_admin_pctxt.entrypoint
def resize2fs(image, check_exit_code, size=None):
unprivileged_resize2fs(image, check_exit_code=check_exit_code, size=size)
# NOTE(mikal): this method is deliberately not wrapped in a privsep
# entrypoint. This is not for unit testing, there are some callers who do
# not require elevated permissions when calling this.
def unprivileged_resize2fs(image, check_exit_code, size=None):
if size:
cmd = ['resize2fs', image, size]
else:
cmd = ['resize2fs', image]
processutils.execute(*cmd, check_exit_code=check_exit_code)
@nova.privsep.sys_admin_pctxt.entrypoint
def create_partition_table(device, style, check_exit_code=True):
processutils.execute('parted', '--script', device, 'mklabel', style,
check_exit_code=check_exit_code)
@nova.privsep.sys_admin_pctxt.entrypoint
def create_partition(device, style, start, end, check_exit_code=True):
processutils.execute('parted', '--script', device, '--',
'mkpart', style, start, end,
check_exit_code=check_exit_code)
@nova.privsep.sys_admin_pctxt.entrypoint
def list_partitions(device):
return unprivileged_list_partitions(device)
# NOTE(mikal): this method is deliberately not wrapped in a privsep
# entrypoint. This is not for unit testing, there are some callers who do
# not require elevated permissions when calling this.
def unprivileged_list_partitions(device):
"""Return partition information (num, size, type) for a device."""
out, _err = processutils.execute('parted', '--script', '--machine',
device, 'unit s', 'print')
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug('Partitions:')
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
num = int(num)
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(' %(num)s: %(fstype)s %(size)d sectors',
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
return partitions
@nova.privsep.sys_admin_pctxt.entrypoint
def resize_partition(device, start, end, bootable):
processutils.execute('parted', '--script', device, 'rm', '1')
processutils.execute('parted', '--script', device, 'mkpart',
'primary', '%ds' % start, '%ds' % end)
if bootable:
processutils.execute('parted', '--script', device,
'set', '1', 'boot', 'on')
@nova.privsep.sys_admin_pctxt.entrypoint
def ext_journal_disable(device):
processutils.execute('tune2fs', '-O ^has_journal', device)
@nova.privsep.sys_admin_pctxt.entrypoint
def ext_journal_enable(device):
processutils.execute('tune2fs', '-j', device)
# NOTE(mikal): nova allows deployers to configure the command line which is
# used to create a filesystem of a given type. This is frankly a little bit
# weird, but its also historical and probably should be in some sort of
# museum. So, we do that thing here, but it requires a funny dance in order
# to load that configuration at startup.
# NOTE(mikal): I really feel like this whole thing should be deprecated, I
# just don't think its a great idea to let people specify a command in a
# configuration option to run as root.
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
FS_FORMAT_EXT2 = "ext2"
FS_FORMAT_EXT3 = "ext3"
FS_FORMAT_EXT4 = "ext4"
FS_FORMAT_XFS = "xfs"
FS_FORMAT_NTFS = "ntfs"
FS_FORMAT_VFAT = "vfat"
SUPPORTED_FS_TO_EXTEND = (
FS_FORMAT_EXT2,
FS_FORMAT_EXT3,
FS_FORMAT_EXT4)
_DEFAULT_FILE_SYSTEM = FS_FORMAT_VFAT
_DEFAULT_FS_BY_OSTYPE = {'linux': FS_FORMAT_EXT4,
'windows': FS_FORMAT_NTFS}
def load_mkfs_command(os_type, command):
global _MKFS_COMMAND
global _DEFAULT_MKFS_COMMAND
_MKFS_COMMAND[os_type] = command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = command
def get_fs_type_for_os_type(os_type):
global _MKFS_COMMAND
return os_type if _MKFS_COMMAND.get(os_type) else 'default'
# NOTE(mikal): this method needs to be duplicated from utils because privsep
# can't depend on code outside the privsep directory.
def _get_hash_str(base_str):
"""Returns string that represents MD5 hash of base_str (in hex format).
If base_str is a Unicode string, encode it to UTF-8.
"""
if isinstance(base_str, str):
base_str = base_str.encode('utf-8')
return md5(base_str, usedforsecurity=False).hexdigest()
def get_file_extension_for_os_type(os_type, default_ephemeral_format,
specified_fs=None):
global _MKFS_COMMAND
global _DEFAULT_MKFS_COMMAND
mkfs_command = _MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND)
if mkfs_command:
extension = mkfs_command
else:
if not specified_fs:
specified_fs = default_ephemeral_format
if not specified_fs:
specified_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type,
_DEFAULT_FILE_SYSTEM)
extension = specified_fs
return _get_hash_str(extension)[:7]
@nova.privsep.sys_admin_pctxt.entrypoint
def mkfs(fs, path, label=None):
unprivileged_mkfs(fs, path, label=None)
# NOTE(mikal): this method is deliberately not wrapped in a privsep
# entrypoint. This is not for unit testing, there are some callers who do
# not require elevated permissions when calling this.
def unprivileged_mkfs(fs, path, label=None):
"""Format a file or block device
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
'btrfs', etc.)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
# add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4', 'ntfs'):
args.extend(['-F'])
if label:
if fs in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
args.extend([label_opt, label])
args.append(path)
processutils.execute(*args)
@nova.privsep.sys_admin_pctxt.entrypoint
def _inner_configurable_mkfs(os_type, fs_label, target):
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % {'fs_label': fs_label, 'target': target}
processutils.execute(*mkfs_command.split())
# NOTE(mikal): this method is deliberately not wrapped in a privsep entrypoint
def configurable_mkfs(os_type, fs_label, target, run_as_root,
default_ephemeral_format, specified_fs=None):
# Format a file or block device using a user provided command for each
# os type. If user has not provided any configuration, format type will
# be used according to a default_ephemeral_format configuration or a
# system default.
global _MKFS_COMMAND
global _DEFAULT_MKFS_COMMAND
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % {'fs_label': fs_label, 'target': target}
if mkfs_command:
if run_as_root:
_inner_configurable_mkfs(os_type, fs_label, target)
else:
processutils.execute(*mkfs_command.split())
else:
if not specified_fs:
specified_fs = default_ephemeral_format
if not specified_fs:
specified_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type,
_DEFAULT_FILE_SYSTEM)
if run_as_root:
mkfs(specified_fs, target, fs_label)
else:
unprivileged_mkfs(specified_fs, target, fs_label)
|
klmitch/nova
|
nova/privsep/fs.py
|
Python
|
apache-2.0
| 12,310
|
import itertools
import logging
import re
import time
import urllib
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
import pytz
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db import connection
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.shortcuts import render
from django.template import loader
from django.urls import reverse
from django.utils import translation
from django.utils.timesince import timesince
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from psycopg2.sql import SQL, Composable, Literal
from analytics.lib.counts import COUNT_STATS, CountStat
from analytics.lib.time_utils import time_range
from analytics.models import (
BaseCount,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from confirmation.models import Confirmation, _properties, confirmation_url
from confirmation.settings import STATUS_ACTIVE
from zerver.decorator import (
require_non_guest_user,
require_server_admin,
require_server_admin_api,
to_utc_datetime,
zulip_login_required,
)
from zerver.lib.actions import (
do_change_plan_type,
do_deactivate_realm,
do_scrub_realm,
do_send_realm_reactivation_email,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.i18n import get_and_set_request_language, get_language_translation_data
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.subdomains import get_subdomain_from_hostname
from zerver.lib.timestamp import convert_to_UTC, timestamp_to_datetime
from zerver.lib.validator import to_non_negative_int
from zerver.models import (
Client,
MultiuseInvite,
PreregistrationUser,
Realm,
UserActivity,
UserActivityInterval,
UserProfile,
get_realm,
)
from zerver.views.invite import get_invitee_emails_set
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
approve_sponsorship,
attach_discount_to_realm,
downgrade_at_the_end_of_billing_cycle,
downgrade_now_without_creating_additional_invoices,
get_current_plan_by_realm,
get_customer_by_realm,
get_discount_for_realm,
get_latest_seat_count,
make_end_of_cycle_updates_if_needed,
update_billing_method_of_current_plan,
update_sponsorship_status,
void_all_open_invoices,
)
if settings.ZILENCER_ENABLED:
from zilencer.models import RemoteInstallationCount, RemoteRealmCount, RemoteZulipServer
MAX_TIME_FOR_FULL_ANALYTICS_GENERATION = timedelta(days=1, minutes=30)
def is_analytics_ready(realm: Realm) -> bool:
return (timezone_now() - realm.date_created) > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION
def render_stats(request: HttpRequest, data_url_suffix: str, target_name: str,
for_installation: bool=False, remote: bool=False,
analytics_ready: bool=True) -> HttpRequest:
page_params = dict(
data_url_suffix=data_url_suffix,
for_installation=for_installation,
remote=remote,
debug_mode=False,
)
request_language = get_and_set_request_language(
request,
request.user.default_language,
translation.get_language_from_path(request.path_info)
)
page_params["translation_data"] = get_language_translation_data(request_language)
return render(request,
'analytics/stats.html',
context=dict(target_name=target_name,
page_params=page_params,
analytics_ready=analytics_ready))
@zulip_login_required
def stats(request: HttpRequest) -> HttpResponse:
realm = request.user.realm
if request.user.is_guest:
# TODO: Make @zulip_login_required pass the UserProfile so we
# can use @require_member_or_admin
raise JsonableError(_("Not allowed for guest users"))
return render_stats(request, '', realm.name or realm.string_id,
analytics_ready=is_analytics_ready(realm))
@require_server_admin
@has_request_variables
def stats_for_realm(request: HttpRequest, realm_str: str) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
return HttpResponseNotFound(f"Realm {realm_str} does not exist")
return render_stats(request, f'/realm/{realm_str}', realm.name or realm.string_id,
analytics_ready=is_analytics_ready(realm))
@require_server_admin
@has_request_variables
def stats_for_remote_realm(request: HttpRequest, remote_server_id: int,
remote_realm_id: int) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(request, f'/remote/{server.id}/realm/{remote_realm_id}',
f"Realm {remote_realm_id} on server {server.hostname}")
@require_server_admin_api
@has_request_variables
def get_chart_data_for_realm(request: HttpRequest, user_profile: UserProfile,
realm_str: str, **kwargs: Any) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
raise JsonableError(_("Invalid organization"))
return get_chart_data(request=request, user_profile=user_profile, realm=realm, **kwargs)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_realm(
request: HttpRequest, user_profile: UserProfile, remote_server_id: int,
remote_realm_id: int, **kwargs: Any) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(request=request, user_profile=user_profile, server=server,
remote=True, remote_realm_id=int(remote_realm_id), **kwargs)
@require_server_admin
def stats_for_installation(request: HttpRequest) -> HttpResponse:
return render_stats(request, '/installation', 'installation', True)
@require_server_admin
def stats_for_remote_installation(request: HttpRequest, remote_server_id: int) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(request, f'/remote/{server.id}/installation',
f'remote installation {server.hostname}', True, True)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_installation(request: HttpRequest, user_profile: UserProfile,
chart_name: str=REQ(), **kwargs: Any) -> HttpResponse:
return get_chart_data(request=request, user_profile=user_profile, for_installation=True, **kwargs)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_installation(
request: HttpRequest,
user_profile: UserProfile,
remote_server_id: int,
chart_name: str=REQ(),
**kwargs: Any) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(request=request, user_profile=user_profile, for_installation=True,
remote=True, server=server, **kwargs)
@require_non_guest_user
@has_request_variables
def get_chart_data(request: HttpRequest, user_profile: UserProfile, chart_name: str=REQ(),
min_length: Optional[int]=REQ(converter=to_non_negative_int, default=None),
start: Optional[datetime]=REQ(converter=to_utc_datetime, default=None),
end: Optional[datetime]=REQ(converter=to_utc_datetime, default=None),
realm: Optional[Realm]=None, for_installation: bool=False,
remote: bool=False, remote_realm_id: Optional[int]=None,
server: Optional["RemoteZulipServer"]=None) -> HttpResponse:
if for_installation:
if remote:
assert settings.ZILENCER_ENABLED
aggregate_table = RemoteInstallationCount
assert server is not None
else:
aggregate_table = InstallationCount
else:
if remote:
assert settings.ZILENCER_ENABLED
aggregate_table = RemoteRealmCount
assert server is not None
assert remote_realm_id is not None
else:
aggregate_table = RealmCount
if chart_name == 'number_of_humans':
stats = [
COUNT_STATS['1day_actives::day'],
COUNT_STATS['realm_active_humans::day'],
COUNT_STATS['active_users_audit:is_bot:day']]
tables = [aggregate_table]
subgroup_to_label: Dict[CountStat, Dict[Optional[str], str]] = {
stats[0]: {None: '_1day'},
stats[1]: {None: '_15day'},
stats[2]: {'false': 'all_time'}}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_over_time':
stats = [COUNT_STATS['messages_sent:is_bot:hour']]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {'false': 'human', 'true': 'bot'}}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_message_type':
stats = [COUNT_STATS['messages_sent:message_type:day']]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {'public_stream': _('Public streams'),
'private_stream': _('Private streams'),
'private_message': _('Private messages'),
'huddle_message': _('Group private messages')}}
labels_sort_function = lambda data: sort_by_totals(data['everyone'])
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_client':
stats = [COUNT_STATS['messages_sent:client:day']]
tables = [aggregate_table, UserCount]
# Note that the labels are further re-written by client_label_map
subgroup_to_label = {stats[0]:
{str(id): name for id, name in Client.objects.values_list('id', 'name')}}
labels_sort_function = sort_client_labels
include_empty_subgroups = False
elif chart_name == 'messages_read_over_time':
stats = [COUNT_STATS['messages_read::hour']]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {None: 'read'}}
labels_sort_function = None
include_empty_subgroups = True
else:
raise JsonableError(_("Unknown chart name: {}").format(chart_name))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None:
start = convert_to_UTC(start)
if end is not None:
end = convert_to_UTC(end)
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: {start}, End: {end}").format(
start=start, end=end,
))
if realm is None:
# Note that this value is invalid for Remote tables; be
# careful not to access it in those code paths.
realm = user_profile.realm
if remote:
# For remote servers, we don't have fillstate data, and thus
# should simply use the first and last data points for the
# table.
assert server is not None
if not aggregate_table.objects.filter(server=server).exists():
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
if start is None:
start = aggregate_table.objects.filter(server=server).first().end_time
if end is None:
end = aggregate_table.objects.filter(server=server).last().end_time
else:
# Otherwise, we can use tables on the current server to
# determine a nice range, and some additional validation.
if start is None:
if for_installation:
start = installation_epoch()
else:
start = realm.date_created
if end is None:
end = max(last_successful_fill(stat.property) or
datetime.min.replace(tzinfo=timezone.utc) for stat in stats)
if start > end and (timezone_now() - start > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION):
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation of realm or installation) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?", realm.string_id, start, end)
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
assert len({stat.frequency for stat in stats}) == 1
end_times = time_range(start, end, stats[0].frequency, min_length)
data: Dict[str, Any] = {
'end_times': [int(end_time.timestamp()) for end_time in end_times],
'frequency': stats[0].frequency,
}
aggregation_level = {
InstallationCount: 'everyone',
RealmCount: 'everyone',
UserCount: 'user',
}
if settings.ZILENCER_ENABLED:
aggregation_level[RemoteInstallationCount] = 'everyone'
aggregation_level[RemoteRealmCount] = 'everyone'
# -1 is a placeholder value, since there is no relevant filtering on InstallationCount
id_value = {
InstallationCount: -1,
RealmCount: realm.id,
UserCount: user_profile.id,
}
if settings.ZILENCER_ENABLED:
if server is not None:
id_value[RemoteInstallationCount] = server.id
# TODO: RemoteRealmCount logic doesn't correctly handle
# filtering by server_id as well.
if remote_realm_id is not None:
id_value[RemoteRealmCount] = remote_realm_id
for table in tables:
data[aggregation_level[table]] = {}
for stat in stats:
data[aggregation_level[table]].update(get_time_series_by_subgroup(
stat, table, id_value[table], end_times, subgroup_to_label[stat], include_empty_subgroups))
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays: Dict[str, List[int]]) -> List[str]:
totals = [(sum(values), label) for label, values in value_arrays.items()]
totals.sort(reverse=True)
return [label for total, label in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data: Dict[str, Dict[str, List[int]]]) -> List[str]:
realm_order = sort_by_totals(data['everyone'])
user_order = sort_by_totals(data['user'])
label_sort_values: Dict[str, float] = {}
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table: Type[BaseCount], key_id: int) -> QuerySet:
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
elif settings.ZILENCER_ENABLED and table == RemoteInstallationCount:
return RemoteInstallationCount.objects.filter(server_id=key_id)
elif settings.ZILENCER_ENABLED and table == RemoteRealmCount:
return RemoteRealmCount.objects.filter(realm_id=key_id)
else:
raise AssertionError(f"Unknown table: {table}")
def client_label_map(name: str) -> str:
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipElectron":
return "Desktop app"
if name == "ZulipAndroid":
return "Old Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "Mobile app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
return name
def rewrite_client_arrays(value_arrays: Dict[str, List[int]]) -> Dict[str, List[int]]:
mapped_arrays: Dict[str, List[int]] = {}
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat: CountStat,
table: Type[BaseCount],
key_id: int,
end_times: List[datetime],
subgroup_to_label: Dict[Optional[str], str],
include_empty_subgroups: bool) -> Dict[str, List[int]]:
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts: Dict[Optional[str], Dict[datetime, int]] = defaultdict(lambda: defaultdict(int))
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in subgroup_to_label.items():
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title: str, cols: Sequence[str], rows: Sequence[Any], has_row_class: bool = False) -> str:
if not has_row_class:
def fix_row(row: Any) -> Dict[str, Any]:
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data),
)
return content
def dictfetchall(cursor: connection.cursor) -> List[Dict[str, Any]]:
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip((col[0] for col in desc), row))
for row in cursor.fetchall()
]
def get_realm_day_counts() -> Dict[str, Dict[str, str]]:
query = SQL('''
select
r.string_id,
(now()::date - date_sent::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
date_sent > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
''')
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts: Dict[str, Dict[int, int]] = defaultdict(dict)
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts[1:])
max_cnt = max(raw_cnts[1:])
def format_count(cnt: int, style: Optional[str]=None) -> str:
if style is not None:
good_bad = style
elif cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return f'<td class="number {good_bad}">{cnt}</td>'
cnts = (format_count(raw_cnts[0], 'neutral')
+ ''.join(map(format_count, raw_cnts[1:])))
result[string_id] = dict(cnts=cnts)
return result
def get_plan_name(plan_type: int) -> str:
return ['', 'self hosted', 'limited', 'standard', 'open source'][plan_type]
def realm_summary_table(realm_minutes: Dict[str, float]) -> str:
now = timezone_now()
query = SQL('''
SELECT
realm.string_id,
realm.date_created,
realm.plan_type,
coalesce(wau_table.value, 0) wau_count,
coalesce(dau_table.value, 0) dau_count,
coalesce(user_count_table.value, 0) user_profile_count,
coalesce(bot_count_table.value, 0) bot_count
FROM
zerver_realm as realm
LEFT OUTER JOIN (
SELECT
value _14day_active_humans,
realm_id
from
analytics_realmcount
WHERE
property = 'realm_active_humans::day'
AND end_time > now() - interval '25 hours'
) as _14day_active_humans_table ON realm.id = _14day_active_humans_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '7day_actives::day'
AND end_time > now() - interval '25 hours'
) as wau_table ON realm.id = wau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '1day_actives::day'
AND end_time > now() - interval '25 hours'
) as dau_table ON realm.id = dau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'false'
AND end_time > now() - interval '25 hours'
) as user_count_table ON realm.id = user_count_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'true'
AND end_time > now() - interval '25 hours'
) as bot_count_table ON realm.id = bot_count_table.realm_id
WHERE
_14day_active_humans IS NOT NULL
or realm.plan_type = 3
ORDER BY
dau_count DESC,
string_id ASC
''')
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# Fetch all the realm administrator users
realm_admins: Dict[str, List[str]] = defaultdict(list)
for up in UserProfile.objects.select_related("realm").filter(
role=UserProfile.ROLE_REALM_ADMINISTRATOR,
is_active=True,
):
realm_admins[up.realm.string_id].append(up.delivery_email)
for row in rows:
row['date_created_day'] = row['date_created'].strftime('%Y-%m-%d')
row['plan_type_string'] = get_plan_name(row['plan_type'])
row['age_days'] = int((now - row['date_created']).total_seconds()
/ 86400)
row['is_new'] = row['age_days'] < 12 * 7
row['realm_admin_email'] = ', '.join(realm_admins[row['string_id']])
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# estimate annual subscription revenue
total_amount = 0
if settings.BILLING_ENABLED:
from corporate.lib.stripe import estimate_annual_recurring_revenue_by_realm
estimated_arrs = estimate_annual_recurring_revenue_by_realm()
for row in rows:
if row['string_id'] in estimated_arrs:
row['amount'] = estimated_arrs[row['string_id']]
total_amount += sum(estimated_arrs.values())
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '{:.1f}'.format(hours / row['dau_count'])
except Exception:
pass
# formatting
for row in rows:
row['stats_link'] = realm_stats_link(row['string_id'])
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row: Dict[str, int]) -> bool:
return row['dau_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_dau_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_wau_count = 0
for row in rows:
total_dau_count += int(row['dau_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_wau_count += int(row['wau_count'])
total_row = dict(
string_id='Total',
plan_type_string="",
amount=total_amount,
stats_link = '',
date_created_day='',
realm_admin_email='',
dau_count=total_dau_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
wau_count=total_wau_count,
)
rows.insert(0, total_row)
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites,
utctime=now.strftime('%Y-%m-%d %H:%MZ')),
)
return content
def user_activity_intervals() -> Tuple[mark_safe, Dict[str, float]]:
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end,
).select_related(
'user_profile',
'user_profile__realm',
).only(
'start',
'end',
'user_profile__delivery_email',
'user_profile__realm__string_id',
).order_by(
'user_profile__realm__string_id',
'user_profile__delivery_email',
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.delivery_email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += f'<hr>{string_id}\n'
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += f" {email:<37}{duration}\n"
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += f"\nTotal duration: {total_duration}\n"
output += f"\nTotal duration in minutes: {total_duration.total_seconds() / 60.}\n"
output += f"Total duration amortized to a month: {total_duration.total_seconds() * 30. / 60.}"
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm: str) -> str:
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots',
]
query = SQL('''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) humans on
series.day = humans.date_sent
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) bots on
series.day = bots.date_sent
''')
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries() -> List[Dict[str, str]]:
def get_page(query: Composable, cols: Sequence[str], title: str,
totals_columns: Sequence[int]=[]) -> Dict[str, str]:
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i: int,
fixup_func: Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None:
for row in rows:
row[i] = fixup_func(row[i])
total_row = []
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
elif col == 'Hostname':
for row in rows:
row[i] = remote_installation_stats_link(row[0], row[i])
if len(totals_columns) > 0:
if i == 0:
total_row.append("Total")
elif i in totals_columns:
total_row.append(str(sum(row[i] for row in rows if row[i] is not None)))
else:
total_row.append('')
if len(totals_columns) > 0:
rows.insert(0, total_row)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title,
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = f'{mobile_type} usage'
query = SQL('''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like {mobile_type}
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''').format(
mobile_type=Literal(mobile_type),
)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = SQL('''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
''')
cols = [
'Realm',
'Client',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = SQL('''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
''')
cols = [
'Realm',
'Client',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = SQL('''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
''')
cols = [
'Client',
'Realm',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
title = 'Remote Zulip servers'
query = SQL('''
with icount as (
select
server_id,
max(value) as max_value,
max(end_time) as max_end_time
from zilencer_remoteinstallationcount
where
property='active_users:is_bot:day'
and subgroup='false'
group by server_id
),
remote_push_devices as (
select server_id, count(distinct(user_id)) as push_user_count from zilencer_remotepushdevicetoken
group by server_id
)
select
rserver.id,
rserver.hostname,
rserver.contact_email,
max_value,
push_user_count,
max_end_time
from zilencer_remotezulipserver rserver
left join icount on icount.server_id = rserver.id
left join remote_push_devices on remote_push_devices.server_id = rserver.id
order by max_value DESC NULLS LAST, push_user_count DESC NULLS LAST
''')
cols = [
'ID',
'Hostname',
'Contact email',
'Analytics users',
'Mobile users',
'Last update time',
]
pages.append(get_page(query, cols, title,
totals_columns=[3, 4]))
return pages
@require_server_admin
@has_request_variables
def get_activity(request: HttpRequest) -> HttpResponse:
duration_content, realm_minutes = user_activity_intervals()
counts_content: str = realm_summary_table(realm_minutes)
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title, is_home=True),
)
def get_confirmations(types: List[int], object_ids: List[int],
hostname: Optional[str]=None) -> List[Dict[str, Any]]:
lowest_datetime = timezone_now() - timedelta(days=30)
confirmations = Confirmation.objects.filter(type__in=types, object_id__in=object_ids,
date_sent__gte=lowest_datetime)
confirmation_dicts = []
for confirmation in confirmations:
realm = confirmation.realm
content_object = confirmation.content_object
type = confirmation.type
days_to_activate = _properties[type].validity_in_days
expiry_date = confirmation.date_sent + timedelta(days=days_to_activate)
if hasattr(content_object, "status"):
if content_object.status == STATUS_ACTIVE:
link_status = "Link has been clicked"
else:
link_status = "Link has never been clicked"
else:
link_status = ""
if timezone_now() < expiry_date:
expires_in = timesince(confirmation.date_sent, expiry_date)
else:
expires_in = "Expired"
url = confirmation_url(confirmation.confirmation_key, realm, type)
confirmation_dicts.append({"object": confirmation.content_object,
"url": url, "type": type, "link_status": link_status,
"expires_in": expires_in})
return confirmation_dicts
@require_server_admin
def support(request: HttpRequest) -> HttpResponse:
context: Dict[str, Any] = {}
if settings.BILLING_ENABLED and request.method == "POST":
# We check that request.POST only has two keys in it: The
# realm_id and a field to change.
keys = set(request.POST.keys())
if "csrfmiddlewaretoken" in keys:
keys.remove("csrfmiddlewaretoken")
if len(keys) != 2:
return json_error(_("Invalid parameters"))
realm_id = request.POST.get("realm_id")
realm = Realm.objects.get(id=realm_id)
if request.POST.get("plan_type", None) is not None:
new_plan_type = int(request.POST.get("plan_type"))
current_plan_type = realm.plan_type
do_change_plan_type(realm, new_plan_type)
msg = f"Plan type of {realm.string_id} changed from {get_plan_name(current_plan_type)} to {get_plan_name(new_plan_type)} "
context["message"] = msg
elif request.POST.get("discount", None) is not None:
new_discount = Decimal(request.POST.get("discount"))
current_discount = get_discount_for_realm(realm)
attach_discount_to_realm(realm, new_discount)
msg = f"Discount of {realm.string_id} changed to {new_discount} from {current_discount} "
context["message"] = msg
elif request.POST.get("status", None) is not None:
status = request.POST.get("status")
if status == "active":
do_send_realm_reactivation_email(realm)
context["message"] = f"Realm reactivation email sent to admins of {realm.string_id}."
elif status == "deactivated":
do_deactivate_realm(realm, request.user)
context["message"] = f"{realm.string_id} deactivated."
elif request.POST.get("billing_method", None) is not None:
billing_method = request.POST.get("billing_method")
if billing_method == "send_invoice":
update_billing_method_of_current_plan(realm, charge_automatically=False)
context["message"] = f"Billing method of {realm.string_id} updated to pay by invoice."
elif billing_method == "charge_automatically":
update_billing_method_of_current_plan(realm, charge_automatically=True)
context["message"] = f"Billing method of {realm.string_id} updated to charge automatically."
elif request.POST.get("sponsorship_pending", None) is not None:
sponsorship_pending = request.POST.get("sponsorship_pending")
if sponsorship_pending == "true":
update_sponsorship_status(realm, True)
context["message"] = f"{realm.string_id} marked as pending sponsorship."
elif sponsorship_pending == "false":
update_sponsorship_status(realm, False)
context["message"] = f"{realm.string_id} is no longer pending sponsorship."
elif request.POST.get('approve_sponsorship') is not None:
if request.POST.get('approve_sponsorship') == "approve_sponsorship":
approve_sponsorship(realm)
context["message"] = f"Sponsorship approved for {realm.string_id}"
elif request.POST.get('downgrade_method', None) is not None:
downgrade_method = request.POST.get('downgrade_method')
if downgrade_method == "downgrade_at_billing_cycle_end":
downgrade_at_the_end_of_billing_cycle(realm)
context["message"] = f"{realm.string_id} marked for downgrade at the end of billing cycle"
elif downgrade_method == "downgrade_now_without_additional_licenses":
downgrade_now_without_creating_additional_invoices(realm)
context["message"] = f"{realm.string_id} downgraded without creating additional invoices"
elif downgrade_method == "downgrade_now_void_open_invoices":
downgrade_now_without_creating_additional_invoices(realm)
voided_invoices_count = void_all_open_invoices(realm)
context["message"] = f"{realm.string_id} downgraded and voided {voided_invoices_count} open invoices"
elif request.POST.get("scrub_realm", None) is not None:
if request.POST.get("scrub_realm") == "scrub_realm":
do_scrub_realm(realm, acting_user=request.user)
context["message"] = f"{realm.string_id} scrubbed."
query = request.GET.get("q", None)
if query:
key_words = get_invitee_emails_set(query)
context["users"] = UserProfile.objects.filter(delivery_email__in=key_words)
realms = set(Realm.objects.filter(string_id__in=key_words))
for key_word in key_words:
try:
URLValidator()(key_word)
parse_result = urllib.parse.urlparse(key_word)
hostname = parse_result.hostname
assert hostname is not None
if parse_result.port:
hostname = f"{hostname}:{parse_result.port}"
subdomain = get_subdomain_from_hostname(hostname)
try:
realms.add(get_realm(subdomain))
except Realm.DoesNotExist:
pass
except ValidationError:
pass
for realm in realms:
realm.customer = get_customer_by_realm(realm)
current_plan = get_current_plan_by_realm(realm)
if current_plan is not None:
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(current_plan, timezone_now())
if last_ledger_entry is not None:
if new_plan is not None:
realm.current_plan = new_plan
else:
realm.current_plan = current_plan
realm.current_plan.licenses = last_ledger_entry.licenses
realm.current_plan.licenses_used = get_latest_seat_count(realm)
context["realms"] = realms
confirmations: List[Dict[str, Any]] = []
preregistration_users = PreregistrationUser.objects.filter(email__in=key_words)
confirmations += get_confirmations([Confirmation.USER_REGISTRATION, Confirmation.INVITATION,
Confirmation.REALM_CREATION], preregistration_users,
hostname=request.get_host())
multiuse_invites = MultiuseInvite.objects.filter(realm__in=realms)
confirmations += get_confirmations([Confirmation.MULTIUSE_INVITE], multiuse_invites)
confirmations += get_confirmations([Confirmation.REALM_REACTIVATION], [realm.id for realm in realms])
context["confirmations"] = confirmations
def realm_admin_emails(realm: Realm) -> str:
return ", ".join(realm.get_human_admin_users().order_by('delivery_email').values_list(
"delivery_email", flat=True))
context["realm_admin_emails"] = realm_admin_emails
context["get_discount_for_realm"] = get_discount_for_realm
context["realm_icon_url"] = realm_icon_url
context["Confirmation"] = Confirmation
return render(request, 'analytics/support.html', context=context)
def get_user_activity_records_for_realm(realm: str, is_bot: bool) -> QuerySet:
fields = [
'user_profile__full_name',
'user_profile__delivery_email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot,
)
records = records.order_by("user_profile__delivery_email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email: str) -> List[QuerySet]:
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__delivery_email=email,
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records: List[QuerySet]) -> str:
cols = [
'query',
'client',
'count',
'last_visit',
]
def row(record: QuerySet) -> List[Any]:
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit),
]
rows = list(map(row, records))
title = 'Raw data'
return make_table(title, cols, rows)
def get_user_activity_summary(records: List[QuerySet]) -> Dict[str, Dict[str, Any]]:
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary: Dict[str, Dict[str, Any]] = {}
def update(action: str, record: QuerySet) -> None:
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit,
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit,
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer',
'update_pointer_backend']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date: Optional[datetime]) -> str:
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email: str) -> mark_safe:
url = reverse(get_user_activity, kwargs=dict(email=email))
email_link = f'<a href="{url}">{email}</a>'
return mark_safe(email_link)
def realm_activity_link(realm_str: str) -> mark_safe:
url = reverse(get_realm_activity, kwargs=dict(realm_str=realm_str))
realm_link = f'<a href="{url}">{realm_str}</a>'
return mark_safe(realm_link)
def realm_stats_link(realm_str: str) -> mark_safe:
url = reverse(stats_for_realm, kwargs=dict(realm_str=realm_str))
stats_link = f'<a href="{url}"><i class="fa fa-pie-chart"></i>{realm_str}</a>'
return mark_safe(stats_link)
def remote_installation_stats_link(server_id: int, hostname: str) -> mark_safe:
url = reverse(stats_for_remote_installation, kwargs=dict(remote_server_id=server_id))
stats_link = f'<a href="{url}"><i class="fa fa-pie-chart"></i>{hostname}</a>'
return mark_safe(stats_link)
def realm_client_table(user_summaries: Dict[str, Dict[str, Dict[str, Any]]]) -> str:
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary: Dict[str, Dict[str, Any]]) -> str:
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records: List[QuerySet],
admin_emails: Set[str]) -> Tuple[Dict[str, Dict[str, Any]], str]:
user_records = {}
def by_email(record: QuerySet) -> str:
return record.user_profile.delivery_email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary: Dict[str, Dict[str, datetime]], k: str) -> Optional[datetime]:
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary: Dict[str, Dict[str, str]], k: str) -> str:
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val: Optional[datetime]) -> bool:
age = timezone_now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row: Dict[str, Any]) -> str:
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@require_server_admin
def get_realm_activity(request: HttpRequest, realm_str: str) -> HttpResponse:
data: List[Tuple[str, str]] = []
all_user_records: Dict[str, Any] = {}
try:
admins = Realm.objects.get(string_id=realm_str).get_human_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound(f"Realm {realm_str} does not exist")
admin_emails = {admin.delivery_email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=None, title=title),
)
@require_server_admin
def get_user_activity(request: HttpRequest, email: str) -> HttpResponse:
records = get_user_activity_records_for_email(email)
data: List[Tuple[str, str]] = []
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title),
)
|
showell/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 57,805
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.north.plugins.port.queue.common import QueueFull
from calvin.runtime.north.plugins.port.queue.fanout_base import FanoutBase
from calvin.utilities import calvinlogger
_log = calvinlogger.get_logger(__name__)
class FanoutMappedFIFO(FanoutBase):
"""
A FIFO which route tokens based on a mapping to peers
"""
def __init__(self, port_properties, peer_port_properties):
super(FanoutMappedFIFO, self).__init__(port_properties, peer_port_properties)
self._type = "dispatch:mapped"
def _state(self):
state = super(FanoutMappedFIFO, self)._state()
state['mapping'] = self.mapping
return state
def _set_state(self, state):
super(FanoutMappedFIFO, self)._set_state(state)
self.mapping = state['mapping']
def _set_port_mapping(self, mapping):
if not set(mapping.values()) == set(self.readers):
print mapping, self.readers
raise Exception("Illegal port mapping dictionary")
self.mapping = mapping
def _unwrap_data(self, data):
# data is a Token whose value is wrapped in a {selector:value} dict
mapped_value = data.value
select, value = mapped_value.popitem()
data.value = value
peer = self.mapping[select]
return data, peer
def write(self, data, metadata):
# print data, metadata
# metadata is port_id of containing port
data, peer = self._unwrap_data(data)
if not self.slots_available(1, peer):
# if not slots_available:
raise QueueFull()
# Write token in peer's FIFO
write_pos = self.write_pos[peer]
#_log.debug("WRITE2 %s %s %d\n%s" % (metadata, peer, write_pos, str(map(str, self.fifo[peer]))))
self.fifo[peer][write_pos % self.N] = data
self.write_pos[peer] = write_pos + 1
return True
def slots_available(self, length, metadata):
# print "slots_available", length, metadata
# Sometimes metadata = id of the outport owning this queue (called from @condition?)
# Darn. That means that we can only check for the case where EVERY sub-queue has at least 'length' slots free...
# Oh well, such is life.
if metadata in self.readers:
return self.write_pos[metadata] - self.read_pos[metadata] < self.N - length
return all(self.write_pos[r] - self.read_pos[r] < self.N - length for r in self.readers)
|
EricssonResearch/calvin-base
|
calvin/runtime/north/plugins/port/queue/fanout_mapped_fifo.py
|
Python
|
apache-2.0
| 3,069
|
# Copyright (c) 2013 Mortar Data
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import abc
import time
import boto.dynamodb2
from boto.dynamodb2.exceptions import DynamoDBError
from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import STRING
import luigi
import logging
from mortar.luigi import target_factory
logger = logging.getLogger('luigi-interface')
class DynamoDBClient(object):
"""
A boto-based client for interacting with DynamoDB from Luigi.
seealso:: https://help.mortardata.com/technologies/luigi/dynamodb_tasks
"""
# interval to wait between polls to DynamoDB API in seconds
TABLE_OPERATION_RESULTS_POLLING_SECONDS = 5.0
# timeout for DynamoDB table creation and ramp-up in seconds
TABLE_OPERATION_RESULTS_TIMEOUT_SECONDS = 60.0 * 30.0
def __init__(self, region='us-east-1', aws_access_key_id=None, aws_secret_access_key=None):
"""
:type region: str
:param region: AWS region where your DynamoDB instance is located. Default: us-east-1.
:type aws_access_key_id: str
:param aws_access_key_id: AWS Access Key ID. If not provided, will be looked up from luigi configuration in dynamodb.aws_access_key_id.
:type aws_secret_access_key: str
:param aws_secret_access_key: AWS Secret Access Key. If not provided, will be looked up from luigi configuration in dynamodb.aws_secret_access_key.
"""
if not aws_access_key_id:
aws_access_key_id = luigi.configuration.get_config().get('dynamodb', 'aws_access_key_id')
if not aws_secret_access_key:
aws_secret_access_key = luigi.configuration.get_config().get('dynamodb', 'aws_secret_access_key')
self.dynamo_cx = boto.dynamodb2.connect_to_region(
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=True)
def create_table(self, table_name, schema, throughput, indexes=None):
"""
Create a new DynamoDB table and block until it is ready to use.
:type table_name: str
:param table_name: Name for table
:type schema: list of boto.dynamodb2.fields.HashKey
:param schema: Table schema
:type throughput: dict with {'read': read_throughput, 'write': write_throughput}
:param throughput: Initial table throughput
:type indexes: list of boto.dynamodb2.fields.AllIndex
:param indexes: Initial indexes for the table. Default: no indexes.
:rtype: boto.dynamodb2.table.Table:
:returns: Newly created Table
"""
table = Table.create(table_name,
schema=schema,
throughput=throughput,
connection=self.dynamo_cx,
indexes=indexes
)
logger.info('Created new dynamodb table %s with schema %s' % \
(table_name, schema))
return self._poll_until_table_active(table)
def get_table(self, table_name):
"""
Fetch a Table from DynamoDB.
NOTE: this is a somewhat expensive operation,
which must query dynamo for the current state
of the table.
:type table_name: str
:param table_name: Name of Table to load
:rtype: boto.dynamodb2.table.Table:
:returns: Requested Table
"""
table = Table(table_name, connection=self.dynamo_cx)
# must describe the table, or it doesn't have the correct throughput values
table.describe()
return table
def update_throughput(self, table_name, throughput):
"""
Update a table's throughput, using the stepwise
fashion of increasing throughput by 2X each iteration,
until the table has reached desired throughput.
note:: As of Oct 2014, stepwise update is no longer required for DynamoDB.
:rtype: boto.dynamodb2.table.Table:
:returns: Table with updated throughput
"""
table = self.get_table(table_name)
# can only go up by 2X at a time; can go as far down in one time as wanted
i = 0
while (table.throughput['read'] != throughput['read']) or \
(table.throughput['write'] != throughput['write']):
request_throughput = {'read': min(throughput['read'], 2 * table.throughput['read']),
'write': min(throughput['write'], 2 * table.throughput['write'])}
logger.info('Round %s: Updating table to throughput %s' % (i, request_throughput))
table.update(request_throughput)
table = self._poll_until_table_active(table)
i += 1
return table
def _poll_until_table_active(self, table):
start_time = time.time()
is_table_ready = False
while (not is_table_ready) and (time.time() - start_time < DynamoDBClient.TABLE_OPERATION_RESULTS_TIMEOUT_SECONDS):
try:
describe_result = table.describe()
status = describe_result['Table']['TableStatus']
if status == 'ACTIVE':
logger.info('Table %s is ACTIVE with throughput %s' % (table.table_name, table.throughput))
is_table_ready = True
else:
logger.debug('Table %s is in status %s' % (table.table_name, status))
time.sleep(DynamoDBClient.TABLE_OPERATION_RESULTS_POLLING_SECONDS)
except DynamoDBError, e:
logger.error('Error querying DynamoDB for table status; retrying. Error: %s' % e)
if not is_table_ready:
raise RuntimeError('Timed out waiting for DynamoDB table %s to be ACTIVE' % table.table_name)
return table
class DynamoDBTask(luigi.Task):
"""
Superclass for Luigi Tasks interacting with DynamoDB.
seealso:: https://help.mortardata.com/technologies/luigi/dynamodb_tasks
"""
@abc.abstractmethod
def table_name(self):
"""
Name of the table on which operation should be performed.
:rtype: str:
:returns: table_name for operation
"""
raise RuntimeError("Please implement the table_name method")
@abc.abstractmethod
def output_token(self):
"""
Luigi Target providing path to a token that indicates
completion of this Task.
:rtype: Target:
:returns: Target for Task completion token
"""
raise RuntimeError("Please implement the output_token method")
def output(self):
"""
The output for this Task. Returns the output token
by default, so the task only runs if the token does not
already exist.
:rtype: Target:
:returns: Target for Task completion token
"""
return self.output_token()
class CreateDynamoDBTable(DynamoDBTask):
"""
Luigi Task to create a new table in DynamoDB.
This Task writes an output token to the location designated
by the `output_token` method to indicate that the
table has been successfully create. The Task will fail
if the requested table name already exists.
Table creation in DynamoDB takes between several seconds and several minutes; this Task will
block until creation has finished.
"""
# Initial read throughput of created table
read_throughput = luigi.IntParameter()
# Initial write throughput of created table
write_throughput = luigi.IntParameter()
# Name of the primary hash key for this table
hash_key = luigi.Parameter()
# Type of the primary hash key (boto.dynamodb2.types)
hash_key_type = luigi.Parameter()
# Name of the primary range key for this table, if it exists
range_key = luigi.Parameter(None)
# Type of the primary range key for this table, if it exists (boto.dynamodb2.types)
range_key_type = luigi.Parameter(None)
# Secondary indexes of the table, provided as a list of dictionaries
# [ {'name': sec_index, 'range_key': range_key_name, 'data_type': NUMBER} ]
indexes = luigi.Parameter(None)
def _generate_indexes(self):
"""
Create boto-friendly index data structure.
"""
all_index = []
for index in self.indexes:
all_index.append(AllIndex(index['name'], parts=[
HashKey(self.hash_key, data_type=self.range_key_type),
RangeKey(index['range_key'], data_type=index['data_type'])]))
return all_index
def run(self):
"""
Create the DynamoDB table.
"""
dynamodb_client = DynamoDBClient()
schema = [HashKey(self.hash_key, data_type=self.hash_key_type)]
if self.range_key:
schema.append(RangeKey(self.range_key, data_type=self.range_key_type))
throughput={'read': self.read_throughput,
'write': self.write_throughput}
if self.indexes:
dynamodb_client.create_table(self.table_name(), schema, throughput, indexes=self._generate_indexes())
else:
dynamodb_client.create_table(self.table_name(), schema, throughput)
# write token to note completion
target_factory.write_file(self.output_token())
class UpdateDynamoDBThroughput(DynamoDBTask):
"""
Luigi Task to update the throughput of an existing DynamoDB table.
This Task writes an output token to the location designated
by the `output_token` method to indicate that the
table has been successfully updated. This Task will fail if the
table does not exist.
"""
# Target read throughput
read_throughput = luigi.IntParameter()
# Target write throughput
write_throughput = luigi.IntParameter()
def run(self):
"""
Update DynamoDB table throughput.
"""
dynamodb_client = DynamoDBClient()
throughput={'read': self.read_throughput,
'write': self.write_throughput}
dynamodb_client.update_throughput(self.table_name(), throughput)
# write token to note completion
target_factory.write_file(self.output_token())
class SanityTestDynamoDBTable(DynamoDBTask):
"""
Luigi Task to sanity check that that a set of sentinal IDs
exist in a DynamoDB table (usually after loading it with data).
This Task writes an output token to the location designated
by the `output_token` method to indicate that the
Task has been successfully completed.
"""
# Name of the primary hash key for this table
hash_key = luigi.Parameter()
# number of entries required to be in the table
min_total_results = luigi.IntParameter(100)
# when testing total entries, require that these field names not be null
non_null_fields = luigi.Parameter([])
# number of results required to be returned for each primary key
result_length = luigi.IntParameter(5)
# when testing specific ids, how many are allowed to fail
failure_threshold = luigi.IntParameter(2)
@abc.abstractmethod
def ids(self):
"""
List of sentinal IDs to sanity check.
:rtype: list of str:
:returns: list of IDs
"""
return RuntimeError("Must provide list of ids to sanity test")
def run(self):
"""
Run sanity check.
"""
dynamodb_client = DynamoDBClient()
table = dynamodb_client.get_table(self.table_name())
# check that the table contains at least min_total_results entries
limit = self.min_total_results
kw = {'limit': limit}
for field in self.non_null_fields:
kw['%s__null' % field] = False
results = [r for r in table.scan(**kw)]
num_results = len(results)
if num_results < limit:
exception_string = 'Sanity check failed: only found %s / %s expected results in table %s' % \
(num_results, limit, self.table_name())
logger.warn(exception_string)
raise DynamoDBTaskException(exception_string)
# do a check on specific ids
self._sanity_check_ids(table)
# write token to note completion
target_factory.write_file(self.output_token())
def _sanity_check_ids(self, table):
failure_count = 0
kw = {'limit': self.result_length}
for id in self.ids():
kw['%s__eq' % self.hash_key] = id
results = table.query(**kw)
if len(list(results)) < self.result_length:
failure_count += 1
logger.info('Id %s only returned %s results.' % (id, len(list(results))))
if failure_count > self.failure_threshold:
exception_string = 'Sanity check failed: %s ids in table %s failed to return sufficient results' % \
(failure_count, self.table_name())
logger.warn(exception_string)
raise DynamoDBTaskException(exception_string)
class DynamoDBTaskException(Exception):
"""
Exception thrown by DynamoDBTask subclasses.
"""
pass
|
robbles/mortar-luigi
|
mortar/luigi/dynamodb.py
|
Python
|
apache-2.0
| 13,619
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchCreateFeatures
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_async]
from google.cloud import aiplatform_v1
async def sample_batch_create_features():
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
requests = aiplatform_v1.CreateFeatureRequest()
requests.parent = "parent_value"
requests.feature.value_type = "BYTES"
requests.feature_id = "feature_id_value"
request = aiplatform_v1.BatchCreateFeaturesRequest(
parent="parent_value",
requests=requests,
)
# Make the request
operation = client.batch_create_features(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_async]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py
|
Python
|
apache-2.0
| 1,850
|