code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
feed_entry_dedupe
~~~~~~~~~~~~~~~~~
Deduplicate entries for the same feed.
Duplicates are entries with the same title *and* summary/content.
If the old entry is read, the new one will be too.
If the old entry is unread, it will be marked as read in favor of the new one.
To load::
READER_PLUGIN='reader.plugins.feed_entry_dedupe:feed_entry_dedupe' \\
python -m reader update -v
Implemented for https://github.com/lemon24/reader/issues/79.
.. todo::
Some possible optimizations:
1. Do this once per feed (now it's one ``get_entries(feed=...)`` per entry).
2. Only get entries with the same title (not possible with the current API).
3. Add the entry directly as read instead of marking it afterwards
(requires a new hook to process the entry before it is added,
and Storage support).
"""
import logging
import re
log = logging.getLogger('reader.plugins.feed_entry_dedupe')
XML_TAG_RE = re.compile(r'<[^<]+?>', re.I)
XML_ENTITY_RE = re.compile(r'&[^\s;]+?;', re.I)
WHITESPACE_RE = re.compile(r'\s+')
def normalize(text):
text = XML_TAG_RE.sub(' ', text)
text = XML_ENTITY_RE.sub(' ', text)
text = WHITESPACE_RE.sub(' ', text).strip()
text = text.lower()
return text
def first_content(entry):
return next((c.value for c in (entry.content or ()) if c.type == 'text/html'), None)
def is_duplicate(one, two):
same_title = False
if one.title and two.title:
same_title = normalize(one.title or '') == normalize(two.title or '')
same_text = False
if one.summary and two.summary:
same_text = normalize(one.summary) == normalize(two.summary)
else:
one_content = first_content(one)
two_content = first_content(two)
if one_content and two_content:
same_text = normalize(one_content) == normalize(two_content)
return same_title and same_text
def feed_entry_dedupe_plugin(reader, url, entry):
duplicates = [
e
for e in reader.get_entries(feed=url)
if e.id != entry.id and is_duplicate(entry, e)
]
if not duplicates:
return
if all(d.read for d in duplicates):
log.info(
"%r (%s): found read duplicates, marking this as read",
(url, entry.id),
entry.title,
)
reader.mark_as_read((url, entry.id))
else:
for duplicate in duplicates:
reader.mark_as_read(duplicate)
log.info(
"%r (%s): found unread duplicates, marking duplicates as read",
(url, entry.id),
entry.title,
)
def feed_entry_dedupe(reader):
reader._post_entry_add_plugins.append(feed_entry_dedupe_plugin)
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/reader/plugins/feed_entry_dedupe.py
|
Python
|
apache-2.0
| 2,706
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-01 19:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20160201_1842'),
]
operations = [
migrations.AddField(
model_name='article',
name='slug',
field=models.SlugField(default='', max_length=100, unique=True, verbose_name='slug'),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default='', max_length=100, unique=True, verbose_name='slug'),
preserve_default=False,
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.SlugField(default='', max_length=100, unique=True, verbose_name='slug'),
preserve_default=False,
),
]
|
janusnic/dj-21v
|
unit_15/mysite/blog/migrations/0003_auto_20160201_1923.py
|
Python
|
mit
| 990
|
# Generated by Django 3.1.3 on 2021-01-02 13:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0009_auto_20201202_1559'),
]
operations = [
migrations.AlterField(
model_name='setting',
name='rir',
field=models.CharField(
blank=True,
choices=[
(None, '------'), ('0', 0), ('0.5', 0.5), ('1', 1), ('1.5', 1.5), ('2', 2),
('2.5', 2.5), ('3', 3), ('3.5', 3.5), ('4', 4)
],
max_length=3,
null=True,
verbose_name='RiR'
),
),
migrations.AlterField(
model_name='workoutlog',
name='rir',
field=models.CharField(
blank=True,
choices=[
(None, '------'), ('0', 0), ('0.5', 0.5), ('1', 1), ('1.5', 1.5), ('2', 2),
('2.5', 2.5), ('3', 3), ('3.5', 3.5), ('4', 4)
],
max_length=3,
null=True,
verbose_name='RiR'
),
),
]
|
wger-project/wger
|
wger/manager/migrations/0010_auto_20210102_1446.py
|
Python
|
agpl-3.0
| 1,194
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class AddQuestionnaireCollectorResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'AddQuestionnaireCollectorResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # AddQuestionnaireCollectorResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
|
liosha2007/temporary-groupdocs-python-sdk
|
groupdocs/models/AddQuestionnaireCollectorResponse.py
|
Python
|
apache-2.0
| 1,193
|
import RPi.GPIO as GPIO
import time
print '#' * 20
print 'HELLO WORLD'
print '#' * 20
GPIO.setmode(GPIO.BOARD)
# Front left
GPIO.setup(15,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
# Front right
GPIO.setup(19,GPIO.OUT)
GPIO.setup(21,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
RUN_TIME = raw_input('RUN TIME: ')
def reset():
GPIO.output(15, GPIO.LOW)
GPIO.output(11, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
GPIO.output(19, GPIO.LOW)
GPIO.output(21, GPIO.LOW)
GPIO.output(23, GPIO.LOW)
def front_left_forward():
GPIO.output(15, GPIO.HIGH)
GPIO.output(11, GPIO.HIGH)
GPIO.output(13, GPIO.LOW)
def front_left_backward():
GPIO.output(15, GPIO.HIGH)
GPIO.output(11, GPIO.LOW)
GPIO.output(13, GPIO.HIGH)
def front_right_forward():
GPIO.output(19, GPIO.HIGH)
GPIO.output(21, GPIO.HIGH)
GPIO.output(23, GPIO.LOW)
def front_right_backward():
GPIO.output(19, GPIO.HIGH)
GPIO.output(21, GPIO.LOW)
GPIO.output(23, GPIO.HIGH)
def stop():
reset()
def main():
reset()
# Front left operation
print 'Front left wheel runing forward'
front_left_forward()
time.sleep(int(RUN_TIME))
stop()
time.sleep(2)
print 'Front left wheel runing backward'
front_left_backward()
time.sleep(int(RUN_TIME))
stop()
# Front right operation
print 'Front right wheel runing forward'
front_right_forward()
time.sleep(int(RUN_TIME))
stop()
time.sleep(2)
print 'Front right wheel runing backward'
front_right_backward()
time.sleep(int(RUN_TIME))
stop()
GPIO.cleanup()
if __name__ == '__main__':
main()
|
voostar/Jiva
|
motor_draft/motor01.py
|
Python
|
mit
| 1,643
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from cement.utils.misc import minimal_logger
from ..lib import aws
from ..objects.exceptions import ServiceError, NotFoundError
from ..resources.strings import responses
LOG = minimal_logger(__name__)
def _make_api_call(operation_name, **operation_options):
return aws.make_api_call('elbv2', operation_name, **operation_options)
def get_instance_healths_from_target_groups(target_group_arns):
results = []
instance_healths = {}
for arn in target_group_arns:
try:
results.append( {
'TargetGroupArn': arn,
'Result': _make_api_call('describe_target_health', TargetGroupArn=arn)
} )
except ServiceError as e:
if e.message == responses['loadbalancer.targetgroup.notfound'].replace('{tgarn}', arn):
raise NotFoundError(e)
for result in results:
for description in result['Result']['TargetHealthDescriptions']:
instance_id = description['Target']['Id']
if instance_id not in instance_healths:
instance_healths[instance_id] = []
instance_healths[instance_id].append({
'TargetGroupArn': result['TargetGroupArn'],
'State': description['TargetHealth'].get('State', ''),
'Description': description['TargetHealth'].get('Description', ''),
'Reason': description['TargetHealth'].get('Reason', '')
})
return instance_healths #map of instance_id => [target group health descrpitions]
def get_target_group_healths(target_group_arns):
results = {}
for arn in target_group_arns:
try:
results[arn] = _make_api_call('describe_target_health', TargetGroupArn=arn)
except ServiceError as e:
if e.code == 'TargetGroupNotFound':
raise NotFoundError(e)
else:
raise e
return results #map of target_group_arn => [target group health descrpitions]
|
quickresolve/accel.ai
|
flask-aws/lib/python2.7/site-packages/ebcli/lib/elbv2.py
|
Python
|
mit
| 2,545
|
from __future__ import absolute_import
from pony.py23compat import PY2, iteritems, basestring, unicode, buffer, int_types
import os
os.environ["NLS_LANG"] = "AMERICAN_AMERICA.UTF8"
from datetime import datetime, date, time, timedelta
from decimal import Decimal
from uuid import UUID
import cx_Oracle
from pony.orm import core, sqlbuilding, dbapiprovider, sqltranslation
from pony.orm.core import log_orm, log_sql, DatabaseError, TranslationError
from pony.orm.dbschema import DBSchema, DBObject, Table, Column
from pony.orm.dbapiprovider import DBAPIProvider, wrap_dbapi_exceptions, get_version_tuple
from pony.utils import throw
from pony.converting import timedelta2str
NoneType = type(None)
class OraTable(Table):
def get_objects_to_create(table, created_tables=None):
result = Table.get_objects_to_create(table, created_tables)
for column in table.column_list:
if column.is_pk == 'auto':
sequence_name = column.converter.attr.kwargs.get('sequence_name')
sequence = OraSequence(table, sequence_name)
trigger = OraTrigger(table, column, sequence)
result.extend((sequence, trigger))
break
return result
class OraSequence(DBObject):
typename = 'Sequence'
def __init__(sequence, table, name=None):
sequence.table = table
table_name = table.name
if name is not None: sequence.name = name
elif isinstance(table_name, basestring): sequence.name = table_name + '_SEQ'
else: sequence.name = tuple(table_name[:-1]) + (table_name[0] + '_SEQ',)
def exists(sequence, provider, connection, case_sensitive=True):
if case_sensitive: sql = 'SELECT sequence_name FROM all_sequences ' \
'WHERE sequence_owner = :so and sequence_name = :sn'
else: sql = 'SELECT sequence_name FROM all_sequences ' \
'WHERE sequence_owner = :so and upper(sequence_name) = upper(:sn)'
owner_name, sequence_name = provider.split_table_name(sequence.name)
cursor = connection.cursor()
cursor.execute(sql, dict(so=owner_name, sn=sequence_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def get_create_command(sequence):
schema = sequence.table.schema
seq_name = schema.provider.quote_name(sequence.name)
return schema.case('CREATE SEQUENCE %s NOCACHE') % seq_name
trigger_template = """
CREATE TRIGGER %s
BEFORE INSERT ON %s
FOR EACH ROW
BEGIN
IF :new.%s IS NULL THEN
SELECT %s.nextval INTO :new.%s FROM DUAL;
END IF;
END;""".strip()
class OraTrigger(DBObject):
typename = 'Trigger'
def __init__(trigger, table, column, sequence):
trigger.table = table
trigger.column = column
trigger.sequence = sequence
table_name = table.name
if not isinstance(table_name, basestring): table_name = table_name[-1]
trigger.name = table_name + '_BI' # Before Insert
def exists(trigger, provider, connection, case_sensitive=True):
if case_sensitive: sql = 'SELECT trigger_name FROM all_triggers ' \
'WHERE table_name = :tbn AND table_owner = :o ' \
'AND trigger_name = :trn AND owner = :o'
else: sql = 'SELECT trigger_name FROM all_triggers ' \
'WHERE table_name = :tbn AND table_owner = :o ' \
'AND upper(trigger_name) = upper(:trn) AND owner = :o'
owner_name, table_name = provider.split_table_name(trigger.table.name)
cursor = connection.cursor()
cursor.execute(sql, dict(tbn=table_name, trn=trigger.name, o=owner_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def get_create_command(trigger):
schema = trigger.table.schema
quote_name = schema.provider.quote_name
trigger_name = quote_name(trigger.name)
table_name = quote_name(trigger.table.name)
column_name = quote_name(trigger.column.name)
seq_name = quote_name(trigger.sequence.name)
return schema.case(trigger_template) % (trigger_name, table_name, column_name, seq_name, column_name)
class OraColumn(Column):
auto_template = None
class OraSchema(DBSchema):
dialect = 'Oracle'
table_class = OraTable
column_class = OraColumn
class OraNoneMonad(sqltranslation.NoneMonad):
def __init__(monad, translator, value=None):
assert value in (None, '')
sqltranslation.ConstMonad.__init__(monad, translator, None)
class OraConstMonad(sqltranslation.ConstMonad):
@staticmethod
def new(translator, value):
if value == '': value = None
return sqltranslation.ConstMonad.new(translator, value)
class OraTranslator(sqltranslation.SQLTranslator):
dialect = 'Oracle'
NoneMonad = OraNoneMonad
ConstMonad = OraConstMonad
@classmethod
def get_normalized_type_of(translator, value):
if value == '': return NoneType
return sqltranslation.SQLTranslator.get_normalized_type_of(value)
class OraBuilder(sqlbuilding.SQLBuilder):
dialect = 'Oracle'
def INSERT(builder, table_name, columns, values, returning=None):
result = sqlbuilding.SQLBuilder.INSERT(builder, table_name, columns, values)
if returning is not None:
result.extend((' RETURNING ', builder.quote_name(returning), ' INTO :new_id'))
return result
def SELECT_FOR_UPDATE(builder, nowait, *sections):
assert not builder.indent
last_section = sections[-1]
if last_section[0] != 'LIMIT':
return builder.SELECT(*sections), 'FOR UPDATE NOWAIT\n' if nowait else 'FOR UPDATE\n'
from_section = sections[1]
assert from_section[0] == 'FROM'
if len(from_section) > 2: throw(NotImplementedError,
'Table joins are not supported for Oracle queries which have both FOR UPDATE and ROWNUM')
order_by_section = None
for section in sections:
if section[0] == 'ORDER_BY': order_by_section = section
table_ast = from_section[1]
assert len(table_ast) == 3 and table_ast[1] == 'TABLE'
table_alias = table_ast[0]
rowid = [ 'COLUMN', table_alias, 'ROWID' ]
sql_ast = [ 'SELECT', sections[0], [ 'FROM', table_ast ], [ 'WHERE', [ 'IN', rowid,
('SELECT', [ 'ROWID', ['AS', rowid, 'row-id' ] ]) + sections[1:] ] ] ]
if order_by_section: sql_ast.append(order_by_section)
result = builder(sql_ast)
return result, 'FOR UPDATE NOWAIT\n' if nowait else 'FOR UPDATE\n'
def SELECT(builder, *sections):
last_section = sections[-1]
limit = offset = None
if last_section[0] == 'LIMIT':
limit = last_section[1]
if len(last_section) > 2: offset = last_section[2]
sections = sections[:-1]
result = builder.subquery(*sections)
indent = builder.indent_spaces * builder.indent
if sections[0][0] == 'ROWID':
indent0 = builder.indent_spaces
x = 't."row-id"'
else:
indent0 = ''
x = 't.*'
if not limit: pass
elif not offset:
result = [ indent0, 'SELECT * FROM (\n' ]
builder.indent += 1
result.extend(builder.subquery(*sections))
builder.indent -= 1
result.extend((indent, ') WHERE ROWNUM <= ', builder(limit), '\n'))
else:
indent2 = indent + builder.indent_spaces
result = [ indent0, 'SELECT %s FROM (\n' % x, indent2, 'SELECT t.*, ROWNUM "row-num" FROM (\n' ]
builder.indent += 2
result.extend(builder.subquery(*sections))
builder.indent -= 2
result.extend((indent2, ') t '))
if limit[0] == 'VALUE' and offset[0] == 'VALUE' \
and isinstance(limit[1], int) and isinstance(offset[1], int):
total_limit = [ 'VALUE', limit[1] + offset[1] ]
result.extend(('WHERE ROWNUM <= ', builder(total_limit), '\n'))
else: result.extend(('WHERE ROWNUM <= ', builder(limit), ' + ', builder(offset), '\n'))
result.extend((indent, ') t WHERE "row-num" > ', builder(offset), '\n'))
if builder.indent:
indent = builder.indent_spaces * builder.indent
return '(\n', result, indent + ')'
return result
def ROWID(builder, *expr_list):
return builder.ALL(*expr_list)
def LIMIT(builder, limit, offset=None):
assert False # pragma: no cover
def DATE(builder, expr):
return 'TRUNC(', builder(expr), ')'
def RANDOM(builder):
return 'dbms_random.value'
def DATE_ADD(builder, expr, delta):
if isinstance(delta, timedelta):
return '(', builder(expr), " + INTERVAL '", timedelta2str(delta), "' HOUR TO SECOND)"
return '(', builder(expr), ' + ', builder(delta), ')'
def DATE_SUB(builder, expr, delta):
if isinstance(delta, timedelta):
return '(', builder(expr), " - INTERVAL '", timedelta2str(delta), "' HOUR TO SECOND)"
return '(', builder(expr), ' - ', builder(delta), ')'
def DATETIME_ADD(builder, expr, delta):
if isinstance(delta, timedelta):
return '(', builder(expr), " + INTERVAL '", timedelta2str(delta), "' HOUR TO SECOND)"
return '(', builder(expr), ' + ', builder(delta), ')'
def DATETIME_SUB(builder, expr, delta):
if isinstance(delta, timedelta):
return '(', builder(expr), " - INTERVAL '", timedelta2str(delta), "' HOUR TO SECOND)"
return '(', builder(expr), ' - ', builder(delta), ')'
class OraBoolConverter(dbapiprovider.BoolConverter):
if not PY2:
def py2sql(converter, val):
# Fixes cx_Oracle 5.1.3 Python 3 bug:
# "DatabaseError: OCI-22062: invalid input string [True]"
return int(val)
def sql2py(converter, val):
return bool(val) # TODO: True/False, T/F, Y/N, Yes/No, etc.
def sql_type(converter):
return "NUMBER(1)"
class OraStrConverter(dbapiprovider.StrConverter):
def validate(converter, val):
if val == '': return None
return dbapiprovider.StrConverter.validate(converter, val)
def sql2py(converter, val):
if isinstance(val, cx_Oracle.LOB):
val = val.read()
if PY2: val = val.decode('utf8')
return val
def sql_type(converter):
# TODO: Add support for NVARCHAR2 and NCLOB datatypes
if converter.max_len:
return 'VARCHAR2(%d CHAR)' % converter.max_len
return 'CLOB'
class OraIntConverter(dbapiprovider.IntConverter):
signed_types = {None: 'NUMBER(38)', 8: 'NUMBER(3)', 16: 'NUMBER(5)', 24: 'NUMBER(7)', 32: 'NUMBER(10)', 64: 'NUMBER(19)'}
unsigned_types = {None: 'NUMBER(38)', 8: 'NUMBER(3)', 16: 'NUMBER(5)', 24: 'NUMBER(8)', 32: 'NUMBER(10)', 64: 'NUMBER(20)'}
def init(self, kwargs):
dbapiprovider.IntConverter.init(self, kwargs)
sequence_name = kwargs.pop('sequence_name', None)
if sequence_name is not None and not (self.attr.auto and self.attr.is_pk):
throw(TypeError, "Parameter 'sequence_name' can be used only for PrimaryKey attributes with auto=True")
class OraRealConverter(dbapiprovider.RealConverter):
default_tolerance = 1e-14
def sql_type(converter):
return 'NUMBER'
class OraDecimalConverter(dbapiprovider.DecimalConverter):
def sql_type(converter):
return 'NUMBER(%d, %d)' % (converter.precision, converter.scale)
class OraBlobConverter(dbapiprovider.BlobConverter):
def sql2py(converter, val):
return buffer(val.read())
class OraDateConverter(dbapiprovider.DateConverter):
def sql2py(converter, val):
if isinstance(val, datetime): return val.date()
if not isinstance(val, date): throw(ValueError,
'Value of unexpected type received from database: instead of date got %s', type(val))
return val
class OraTimeConverter(dbapiprovider.TimeConverter):
sql_type_name = 'INTERVAL DAY(0) TO SECOND'
def __init__(converter, provider, py_type, attr=None):
dbapiprovider.TimeConverter.__init__(converter, provider, py_type, attr)
if attr is not None and converter.precision > 0:
# cx_Oracle 5.1.3 corrupts microseconds for values of DAY TO SECOND type
converter.precision = 0
def sql2py(converter, val):
if isinstance(val, timedelta):
total_seconds = val.days * (24 * 60 * 60) + val.seconds
if 0 <= total_seconds <= 24 * 60 * 60:
minutes, seconds = divmod(total_seconds, 60)
hours, minutes = divmod(minutes, 60)
return time(hours, minutes, seconds, val.microseconds)
elif not isinstance(val, time): throw(ValueError,
'Value of unexpected type received from database%s: instead of time or timedelta got %s'
% ('for attribute %s' % converter.attr if converter.attr else '', type(val)))
return val
def py2sql(converter, val):
return timedelta(hours=val.hour, minutes=val.minute, seconds=val.second, microseconds=val.microsecond)
class OraTimedeltaConverter(dbapiprovider.TimedeltaConverter):
sql_type_name = 'INTERVAL DAY TO SECOND'
def __init__(converter, provider, py_type, attr=None):
dbapiprovider.TimedeltaConverter.__init__(converter, provider, py_type, attr)
if attr is not None and converter.precision > 0:
# cx_Oracle 5.1.3 corrupts microseconds for values of DAY TO SECOND type
converter.precision = 0
class OraDatetimeConverter(dbapiprovider.DatetimeConverter):
sql_type_name = 'TIMESTAMP'
class OraUuidConverter(dbapiprovider.UuidConverter):
def sql_type(converter):
return 'RAW(16)'
class OraProvider(DBAPIProvider):
dialect = 'Oracle'
paramstyle = 'named'
max_name_len = 30
table_if_not_exists_syntax = False
index_if_not_exists_syntax = False
varchar_default_max_len = 1000
uint64_support = True
dbapi_module = cx_Oracle
dbschema_cls = OraSchema
translator_cls = OraTranslator
sqlbuilder_cls = OraBuilder
name_before_table = 'owner'
converter_classes = [
(bool, OraBoolConverter),
(basestring, OraStrConverter),
(int_types, OraIntConverter),
(float, OraRealConverter),
(Decimal, OraDecimalConverter),
(datetime, OraDatetimeConverter),
(date, OraDateConverter),
(time, OraTimeConverter),
(timedelta, OraTimedeltaConverter),
(UUID, OraUuidConverter),
(buffer, OraBlobConverter),
]
@wrap_dbapi_exceptions
def inspect_connection(provider, connection):
cursor = connection.cursor()
cursor.execute('SELECT version FROM product_component_version '
"WHERE product LIKE 'Oracle Database %'")
provider.server_version = get_version_tuple(cursor.fetchone()[0])
cursor.execute("SELECT sys_context( 'userenv', 'current_schema' ) FROM DUAL")
provider.default_schema_name = cursor.fetchone()[0]
def should_reconnect(provider, exc):
reconnect_error_codes = (
3113, # ORA-03113: end-of-file on communication channel
3114, # ORA-03114: not connected to ORACLE
)
return isinstance(exc, cx_Oracle.OperationalError) \
and exc.args[0].code in reconnect_error_codes
def normalize_name(provider, name):
return name[:provider.max_name_len].upper()
def normalize_vars(provider, vars, vartypes):
for name, value in iteritems(vars):
if value == '':
vars[name] = None
vartypes[name] = NoneType
@wrap_dbapi_exceptions
def set_transaction_mode(provider, connection, cache):
assert not cache.in_transaction
db_session = cache.db_session
if db_session is not None and db_session.serializable:
cursor = connection.cursor()
sql = 'SET TRANSACTION ISOLATION LEVEL SERIALIZABLE'
if core.debug: log_orm(sql)
cursor.execute(sql)
cache.immediate = True
if db_session is not None and (db_session.serializable or db_session.ddl):
cache.in_transaction = True
@wrap_dbapi_exceptions
def execute(provider, cursor, sql, arguments=None, returning_id=False):
if type(arguments) is list:
assert arguments and not returning_id
set_input_sizes(cursor, arguments[0])
cursor.executemany(sql, arguments)
else:
if arguments is not None: set_input_sizes(cursor, arguments)
if returning_id:
var = cursor.var(cx_Oracle.STRING, 40, cursor.arraysize, outconverter=int)
arguments['new_id'] = var
if arguments is None: cursor.execute(sql)
else: cursor.execute(sql, arguments)
return var.getvalue()
if arguments is None: cursor.execute(sql)
else: cursor.execute(sql, arguments)
def get_pool(provider, *args, **kwargs):
user = password = dsn = None
if len(args) == 1:
conn_str = args[0]
if '/' in conn_str:
user, tail = conn_str.split('/', 1)
if '@' in tail: password, dsn = tail.split('@', 1)
if None in (user, password, dsn): throw(ValueError,
"Incorrect connection string (must be in form of 'user/password@dsn')")
elif len(args) == 2: user, password = args
elif len(args) == 3: user, password, dsn = args
elif args: throw(ValueError, 'Invalid number of positional arguments')
if user != kwargs.setdefault('user', user):
throw(ValueError, 'Ambiguous value for user')
if password != kwargs.setdefault('password', password):
throw(ValueError, 'Ambiguous value for password')
if dsn != kwargs.setdefault('dsn', dsn):
throw(ValueError, 'Ambiguous value for dsn')
kwargs.setdefault('threaded', True)
kwargs.setdefault('min', 1)
kwargs.setdefault('max', 10)
kwargs.setdefault('increment', 1)
return OraPool(**kwargs)
def table_exists(provider, connection, table_name, case_sensitive=True):
owner_name, table_name = provider.split_table_name(table_name)
cursor = connection.cursor()
if case_sensitive: sql = 'SELECT table_name FROM all_tables WHERE owner = :o AND table_name = :tn'
else: sql = 'SELECT table_name FROM all_tables WHERE owner = :o AND upper(table_name) = upper(:tn)'
cursor.execute(sql, dict(o=owner_name, tn=table_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def index_exists(provider, connection, table_name, index_name, case_sensitive=True):
owner_name, table_name = provider.split_table_name(table_name)
if not isinstance(index_name, basestring): throw(NotImplementedError)
if case_sensitive: sql = 'SELECT index_name FROM all_indexes WHERE owner = :o ' \
'AND index_name = :i AND table_owner = :o AND table_name = :t'
else: sql = 'SELECT index_name FROM all_indexes WHERE owner = :o ' \
'AND upper(index_name) = upper(:i) AND table_owner = :o AND table_name = :t'
cursor = connection.cursor()
cursor.execute(sql, dict(o=owner_name, i=index_name, t=table_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def fk_exists(provider, connection, table_name, fk_name, case_sensitive=True):
owner_name, table_name = provider.split_table_name(table_name)
if not isinstance(fk_name, basestring): throw(NotImplementedError)
if case_sensitive:
sql = "SELECT constraint_name FROM user_constraints WHERE constraint_type = 'R' " \
'AND table_name = :tn AND constraint_name = :cn AND owner = :o'
else: sql = "SELECT constraint_name FROM user_constraints WHERE constraint_type = 'R' " \
'AND table_name = :tn AND upper(constraint_name) = upper(:cn) AND owner = :o'
cursor = connection.cursor()
cursor.execute(sql, dict(tn=table_name, cn=fk_name, o=owner_name))
row = cursor.fetchone()
return row[0] if row is not None else None
def table_has_data(provider, connection, table_name):
table_name = provider.quote_name(table_name)
cursor = connection.cursor()
cursor.execute('SELECT 1 FROM %s WHERE ROWNUM = 1' % table_name)
return cursor.fetchone() is not None
def drop_table(provider, connection, table_name):
table_name = provider.quote_name(table_name)
cursor = connection.cursor()
sql = 'DROP TABLE %s CASCADE CONSTRAINTS' % table_name
cursor.execute(sql)
provider_cls = OraProvider
def to_int_or_decimal(val):
val = val.replace(',', '.')
if '.' in val: return Decimal(val)
return int(val)
def to_decimal(val):
return Decimal(val.replace(',', '.'))
def output_type_handler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.NUMBER:
if scale == 0:
if precision: return cursor.var(cx_Oracle.STRING, 40, cursor.arraysize, outconverter=int)
return cursor.var(cx_Oracle.STRING, 40, cursor.arraysize, outconverter=to_int_or_decimal)
if scale != -127:
return cursor.var(cx_Oracle.STRING, 100, cursor.arraysize, outconverter=to_decimal)
elif defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(unicode, size, cursor.arraysize) # from cx_Oracle example
return None
class OraPool(object):
def __init__(pool, **kwargs):
pool._pool = cx_Oracle.SessionPool(**kwargs)
def connect(pool):
if core.debug: log_orm('GET CONNECTION')
con = pool._pool.acquire()
con.outputtypehandler = output_type_handler
return con
def release(pool, con):
pool._pool.release(con)
def drop(pool, con):
pool._pool.drop(con)
def disconnect(pool):
pass
def get_inputsize(arg):
if isinstance(arg, datetime):
return cx_Oracle.TIMESTAMP
return None
def set_input_sizes(cursor, arguments):
if type(arguments) is dict:
input_sizes = {}
for name, arg in iteritems(arguments):
size = get_inputsize(arg)
if size is not None: input_sizes[name] = size
cursor.setinputsizes(**input_sizes)
elif type(arguments) is tuple:
input_sizes = map(get_inputsize, arguments)
cursor.setinputsizes(*input_sizes)
else: assert False, type(arguments) # pragma: no cover
|
compiteing/flask-ponypermission
|
venv/lib/python2.7/site-packages/pony/orm/dbproviders/oracle.py
|
Python
|
mit
| 23,505
|
"""
Utilities for generating random numbers, random sequences, and
random selections.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
import sys
import networkx as nx
__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult(dschult@colgate.edu)',
'Ben Edwards(bedwards@cs.unm.edu)'])
import warnings as _warnings
def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
_warnings.warn("create_degree_sequence() is deprecated",
DeprecationWarning)
""" Attempt to create a valid degree sequence of length n using
specified function sfunction(n,**kwds).
Parameters
----------
n : int
Length of degree sequence = number of nodes
sfunction: function
Function which returns a list of n real or integer values.
Called as "sfunction(n,**kwds)".
max_tries: int
Max number of attempts at creating valid degree sequence.
Notes
-----
Repeatedly create a degree sequence by calling sfunction(n,**kwds)
until achieving a valid degree sequence. If unsuccessful after
max_tries attempts, raise an exception.
For examples of sfunctions that return sequences of random numbers,
see networkx.Utils.
Examples
--------
>>> from networkx.utils import uniform_sequence, create_degree_sequence
>>> seq=create_degree_sequence(10,uniform_sequence)
"""
tries=0
max_deg=n
while tries < max_tries:
trialseq=sfunction(n,**kwds)
# round to integer values in the range [0,max_deg]
seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
# if graphical return, else throw away and try again
if nx.is_valid_degree_sequence(seq):
return seq
tries+=1
raise nx.NetworkXError(\
"Exceeded max (%d) attempts at a valid sequence."%max_tries)
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# http://www.python.org/doc/current/lib/module-random.html
def pareto_sequence(n,exponent=1.0):
"""
Return sample sequence of length n from a Pareto distribution.
"""
return [random.paretovariate(exponent) for i in range(n)]
def powerlaw_sequence(n,exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent-1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
::math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1] for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
..[1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if not seed is None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin*u**-(1.0/a1))
t = (1.0+(1.0/x))**a1
if v*x*(t-1.0)/(b-1.0) <= t/b:
break
return x
def zipf_sequence(n, alpha=2.0, xmin=1):
"""Return a sample sequence of length n from a Zipf distribution with
exponent parameter alpha and minimum value xmin.
See Also
--------
zipf_rv
"""
return [ zipf_rv(alpha,xmin) for _ in range(n)]
def uniform_sequence(n):
"""
Return sample sequence of length n from a uniform distribution.
"""
return [ random.uniform(0,n) for i in range(n)]
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf=[]
cdf.append(0.0)
psum=float(sum(distribution))
for i in range(0,len(distribution)):
cdf.append(cdf[i]+distribution[i]/psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf=cdistribution
elif distribution is not None:
cdf=cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq=[random.random() for i in range(n)]
# choose from CDF
seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
|
valiantljk/graph-partition
|
utils/random_sequence.py
|
Python
|
gpl-2.0
| 6,428
|
import os
from pygame import RLEACCEL
from pygame import image as py_image
from pygame import error as py_error
def load_image(name, colorkey=None):
fullname = os.path.join('images', name)
try:
image = py_image.load_basic(fullname)
except py_error as message:
print('Cannot load image:', name)
raise SystemExit(message)
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
|
TurBoss/HostilPlanet
|
lib/utilities.py
|
Python
|
gpl-2.0
| 569
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_aaa_server_host
version_added: "2.4"
short_description: Manages AAA server host configuration on HUAWEI CloudEngine switches.
description:
- Manages AAA server host configuration on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@QijunPan)
options:
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present', 'absent']
local_user_name:
description:
- Name of a local user.
The value is a string of 1 to 253 characters.
local_password:
description:
- Login password of a user. The password can contain letters, numbers, and special characters.
The value is a string of 1 to 255 characters.
local_service_type:
description:
- The type of local user login through, such as ftp ssh snmp telnet.
local_ftp_dir:
description:
- FTP user directory.
The value is a string of 1 to 255 characters.
local_user_level:
description:
- Login level of a local user.
The value is an integer ranging from 0 to 15.
local_user_group:
description:
- Name of the user group where the user belongs. The user inherits all the rights of the user group.
The value is a string of 1 to 32 characters.
radius_group_name:
description:
- RADIUS server group's name.
The value is a string of 1 to 32 case-insensitive characters.
radius_server_type:
description:
- Type of Radius Server.
choices: ['Authentication', 'Accounting']
radius_server_ip:
description:
- IPv4 address of configured server.
The value is a string of 0 to 255 characters, in dotted decimal notation.
radius_server_ipv6:
description:
- IPv6 address of configured server.
The total length is 128 bits.
radius_server_port:
description:
- Configured server port for a particular server.
The value is an integer ranging from 1 to 65535.
radius_server_mode:
description:
- Configured primary or secondary server for a particular server.
choices: ['Secondary-server', 'Primary-server']
radius_vpn_name:
description:
- Set VPN instance.
The value is a string of 1 to 31 case-sensitive characters.
radius_server_name:
description:
- Hostname of configured server.
The value is a string of 0 to 255 case-sensitive characters.
hwtacacs_template:
description:
- Name of a HWTACACS template.
The value is a string of 1 to 32 case-insensitive characters.
hwtacacs_server_ip:
description:
- Server IPv4 address. Must be a valid unicast IP address.
The value is a string of 0 to 255 characters, in dotted decimal notation.
hwtacacs_server_ipv6:
description:
- Server IPv6 address. Must be a valid unicast IP address.
The total length is 128 bits.
hwtacacs_server_type:
description:
- Hwtacacs server type.
choices: ['Authentication', 'Authorization', 'Accounting', 'Common']
hwtacacs_is_secondary_server:
description:
- Whether the server is secondary.
type: bool
default: 'no'
hwtacacs_vpn_name:
description:
- VPN instance name.
hwtacacs_is_public_net:
description:
- Set the public-net.
type: bool
default: 'no'
hwtacacs_server_host_name:
description:
- Hwtacacs server host name.
'''
EXAMPLES = '''
- name: AAA server host test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config local user when use local scheme"
ce_aaa_server_host:
state: present
local_user_name: user1
local_password: 123456
provider: "{{ cli }}"
- name: "Undo local user when use local scheme"
ce_aaa_server_host:
state: absent
local_user_name: user1
local_password: 123456
provider: "{{ cli }}"
- name: "Config radius server ip"
ce_aaa_server_host:
state: present
radius_group_name: group1
radius_server_type: Authentication
radius_server_ip: 10.1.10.1
radius_server_port: 2000
radius_server_mode: Primary-server
radius_vpn_name: _public_
provider: "{{ cli }}"
- name: "Undo radius server ip"
ce_aaa_server_host:
state: absent
radius_group_name: group1
radius_server_type: Authentication
radius_server_ip: 10.1.10.1
radius_server_port: 2000
radius_server_mode: Primary-server
radius_vpn_name: _public_
provider: "{{ cli }}"
- name: "Config hwtacacs server ip"
ce_aaa_server_host:
state: present
hwtacacs_template: template
hwtacacs_server_ip: 10.10.10.10
hwtacacs_server_type: Authorization
hwtacacs_vpn_name: _public_
provider: "{{ cli }}"
- name: "Undo hwtacacs server ip"
ce_aaa_server_host:
state: absent
hwtacacs_template: template
hwtacacs_server_ip: 10.10.10.10
hwtacacs_server_type: Authorization
hwtacacs_vpn_name: _public_
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"hwtacacs_is_public_net": "false",
"hwtacacs_is_secondary_server": "false",
"hwtacacs_server_ip": "10.135.182.157",
"hwtacacs_server_type": "Authorization",
"hwtacacs_template": "wdz",
"hwtacacs_vpn_name": "_public_",
"local_password": "******",
"state": "present"}
existing:
description: k/v pairs of existing aaa server host
returned: always
type: dict
sample: {"radius server ipv4": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"radius server ipv4": [
[
"10.1.10.1",
"Authentication",
"2000",
"Primary-server",
"_public_"
]
]}
updates:
description: command sent to the device
returned: always
type: list
sample: ["hwtacacs server template test",
"hwtacacs server authorization 10.135.182.157 vpn-instance test_vpn public-net"]
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
SUCCESS = """success"""
FAILED = """failed"""
INVALID_USER_NAME_CHAR = [' ', '/', '\\',
':', '*', '?', '"', '\'', '<', '>', '%']
# get local user name
CE_GET_LOCAL_USER_INFO_HEADER = """
<filter type="subtree">
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lam>
<users>
<user>
<userName></userName>
<password></password>
"""
CE_GET_LOCAL_USER_INFO_TAIL = """
</user>
</users>
</lam>
</aaa>
</filter>
"""
# merge local user name
CE_MERGE_LOCAL_USER_INFO_HEADER = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lam>
<users>
<user operation="merge">
<userName>%s</userName>
"""
CE_MERGE_LOCAL_USER_INFO_TAIL = """
</user>
</users>
</lam>
</aaa>
</config>
"""
# delete local user name
CE_DELETE_LOCAL_USER_INFO_HEADER = """
<config>
<aaa xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lam>
<users>
<user operation="delete">
<userName>%s</userName>
"""
CE_DELETE_LOCAL_USER_INFO_TAIL = """
</user>
</users>
</lam>
</aaa>
</config>
"""
# get radius server config ipv4
CE_GET_RADIUS_SERVER_CFG_IPV4 = """
<filter type="subtree">
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerIPV4s>
<rdsServerIPV4>
<serverType></serverType>
<serverIPAddress></serverIPAddress>
<serverPort></serverPort>
<serverMode></serverMode>
<vpnName></vpnName>
</rdsServerIPV4>
</rdsServerIPV4s>
</rdsTemplate>
</rdsTemplates>
</radius>
</filter>
"""
# merge radius server config ipv4
CE_MERGE_RADIUS_SERVER_CFG_IPV4 = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerIPV4s>
<rdsServerIPV4 operation="merge">
<serverType>%s</serverType>
<serverIPAddress>%s</serverIPAddress>
<serverPort>%s</serverPort>
<serverMode>%s</serverMode>
<vpnName>%s</vpnName>
</rdsServerIPV4>
</rdsServerIPV4s>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# delete radius server config ipv4
CE_DELETE_RADIUS_SERVER_CFG_IPV4 = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerIPV4s>
<rdsServerIPV4 operation="delete">
<serverType>%s</serverType>
<serverIPAddress>%s</serverIPAddress>
<serverPort>%s</serverPort>
<serverMode>%s</serverMode>
<vpnName>%s</vpnName>
</rdsServerIPV4>
</rdsServerIPV4s>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# get radius server config ipv6
CE_GET_RADIUS_SERVER_CFG_IPV6 = """
<filter type="subtree">
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerIPV6s>
<rdsServerIPV6>
<serverType></serverType>
<serverIPAddress></serverIPAddress>
<serverPort></serverPort>
<serverMode></serverMode>
</rdsServerIPV6>
</rdsServerIPV6s>
</rdsTemplate>
</rdsTemplates>
</radius>
</filter>
"""
# merge radius server config ipv6
CE_MERGE_RADIUS_SERVER_CFG_IPV6 = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerIPV6s>
<rdsServerIPV6 operation="merge">
<serverType>%s</serverType>
<serverIPAddress>%s</serverIPAddress>
<serverPort>%s</serverPort>
<serverMode>%s</serverMode>
</rdsServerIPV6>
</rdsServerIPV6s>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# delete radius server config ipv6
CE_DELETE_RADIUS_SERVER_CFG_IPV6 = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerIPV6s>
<rdsServerIPV6 operation="delete">
<serverType>%s</serverType>
<serverIPAddress>%s</serverIPAddress>
<serverPort>%s</serverPort>
<serverMode>%s</serverMode>
</rdsServerIPV6>
</rdsServerIPV6s>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# get radius server name
CE_GET_RADIUS_SERVER_NAME = """
<filter type="subtree">
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerNames>
<rdsServerName>
<serverType></serverType>
<serverName></serverName>
<serverPort></serverPort>
<serverMode></serverMode>
<vpnName></vpnName>
</rdsServerName>
</rdsServerNames>
</rdsTemplate>
</rdsTemplates>
</radius>
</filter>
"""
# merge radius server name
CE_MERGE_RADIUS_SERVER_NAME = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerNames>
<rdsServerName operation="merge">
<serverType>%s</serverType>
<serverName>%s</serverName>
<serverPort>%s</serverPort>
<serverMode>%s</serverMode>
<vpnName>%s</vpnName>
</rdsServerName>
</rdsServerNames>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# delete radius server name
CE_DELETE_RADIUS_SERVER_NAME = """
<config>
<radius xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<rdsTemplates>
<rdsTemplate>
<groupName>%s</groupName>
<rdsServerNames>
<rdsServerName operation="delete">
<serverType>%s</serverType>
<serverName>%s</serverName>
<serverPort>%s</serverPort>
<serverMode>%s</serverMode>
<vpnName>%s</vpnName>
</rdsServerName>
</rdsServerNames>
</rdsTemplate>
</rdsTemplates>
</radius>
</config>
"""
# get hwtacacs server config ipv4
CE_GET_HWTACACS_SERVER_CFG_IPV4 = """
<filter type="subtree">
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacSrvCfgs>
<hwTacSrvCfg>
<serverIpAddress></serverIpAddress>
<serverType></serverType>
<isSecondaryServer></isSecondaryServer>
<vpnName></vpnName>
<isPublicNet></isPublicNet>
</hwTacSrvCfg>
</hwTacSrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</filter>
"""
# merge hwtacacs server config ipv4
CE_MERGE_HWTACACS_SERVER_CFG_IPV4 = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacSrvCfgs>
<hwTacSrvCfg operation="merge">
<serverIpAddress>%s</serverIpAddress>
<serverType>%s</serverType>
<isSecondaryServer>%s</isSecondaryServer>
<vpnName>%s</vpnName>
<isPublicNet>%s</isPublicNet>
</hwTacSrvCfg>
</hwTacSrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# delete hwtacacs server config ipv4
CE_DELETE_HWTACACS_SERVER_CFG_IPV4 = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacSrvCfgs>
<hwTacSrvCfg operation="delete">
<serverIpAddress>%s</serverIpAddress>
<serverType>%s</serverType>
<isSecondaryServer>%s</isSecondaryServer>
<vpnName>%s</vpnName>
<isPublicNet>%s</isPublicNet>
</hwTacSrvCfg>
</hwTacSrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# get hwtacacs server config ipv6
CE_GET_HWTACACS_SERVER_CFG_IPV6 = """
<filter type="subtree">
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacIpv6SrvCfgs>
<hwTacIpv6SrvCfg>
<serverIpAddress></serverIpAddress>
<serverType></serverType>
<isSecondaryServer></isSecondaryServer>
<vpnName></vpnName>
</hwTacIpv6SrvCfg>
</hwTacIpv6SrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</filter>
"""
# merge hwtacacs server config ipv6
CE_MERGE_HWTACACS_SERVER_CFG_IPV6 = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacIpv6SrvCfgs>
<hwTacIpv6SrvCfg operation="merge">
<serverIpAddress>%s</serverIpAddress>
<serverType>%s</serverType>
<isSecondaryServer>%s</isSecondaryServer>
<vpnName>%s</vpnName>
</hwTacIpv6SrvCfg>
</hwTacIpv6SrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# delete hwtacacs server config ipv6
CE_DELETE_HWTACACS_SERVER_CFG_IPV6 = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacIpv6SrvCfgs>
<hwTacIpv6SrvCfg operation="delete">
<serverIpAddress>%s</serverIpAddress>
<serverType>%s</serverType>
<isSecondaryServer>%s</isSecondaryServer>
<vpnName>%s</vpnName>
</hwTacIpv6SrvCfg>
</hwTacIpv6SrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# get hwtacacs host server config
CE_GET_HWTACACS_HOST_SERVER_CFG = """
<filter type="subtree">
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacHostSrvCfgs>
<hwTacHostSrvCfg>
<serverHostName></serverHostName>
<serverType></serverType>
<isSecondaryServer></isSecondaryServer>
<vpnName></vpnName>
<isPublicNet></isPublicNet>
</hwTacHostSrvCfg>
</hwTacHostSrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</filter>
"""
# merge hwtacacs host server config
CE_MERGE_HWTACACS_HOST_SERVER_CFG = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacHostSrvCfgs>
<hwTacHostSrvCfg operation="merge">
<serverHostName>%s</serverHostName>
<serverType>%s</serverType>
<isSecondaryServer>%s</isSecondaryServer>
<vpnName>%s</vpnName>
<isPublicNet>%s</isPublicNet>
</hwTacHostSrvCfg>
</hwTacHostSrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
# delete hwtacacs host server config
CE_DELETE_HWTACACS_HOST_SERVER_CFG = """
<config>
<hwtacacs xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<hwTacTempCfgs>
<hwTacTempCfg>
<templateName>%s</templateName>
<hwTacHostSrvCfgs>
<hwTacHostSrvCfg operation="delete">
<serverHostName>%s</serverHostName>
<serverType>%s</serverType>
<isSecondaryServer>%s</isSecondaryServer>
<vpnName>%s</vpnName>
<isPublicNet>%s</isPublicNet>
</hwTacHostSrvCfg>
</hwTacHostSrvCfgs>
</hwTacTempCfg>
</hwTacTempCfgs>
</hwtacacs>
</config>
"""
class AaaServerHost(object):
""" Manages aaa server host configuration """
def netconf_get_config(self, **kwargs):
""" Get configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" Set configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
recv_xml = set_nc_config(module, conf_str)
return recv_xml
def get_local_user_info(self, **kwargs):
""" Get local user information """
module = kwargs["module"]
local_user_name = module.params['local_user_name']
local_service_type = module.params['local_service_type']
local_ftp_dir = module.params['local_ftp_dir']
local_user_level = module.params['local_user_level']
local_user_group = module.params['local_user_group']
state = module.params['state']
result = dict()
result["local_user_info"] = []
need_cfg = False
conf_str = CE_GET_LOCAL_USER_INFO_HEADER
if local_service_type:
if local_service_type == "none":
conf_str += "<serviceTerminal></serviceTerminal>"
conf_str += "<serviceTelnet></serviceTelnet>"
conf_str += "<serviceFtp></serviceFtp>"
conf_str += "<serviceSsh></serviceSsh>"
conf_str += "<serviceSnmp></serviceSnmp>"
conf_str += "<serviceDot1x></serviceDot1x>"
elif local_service_type == "dot1x":
conf_str += "<serviceDot1x></serviceDot1x>"
else:
option = local_service_type.split(" ")
for tmp in option:
if tmp == "dot1x":
module.fail_json(
msg='Error: Do not input dot1x with other service type.')
elif tmp == "none":
module.fail_json(
msg='Error: Do not input none with other service type.')
elif tmp == "ftp":
conf_str += "<serviceFtp></serviceFtp>"
elif tmp == "snmp":
conf_str += "<serviceSnmp></serviceSnmp>"
elif tmp == "ssh":
conf_str += "<serviceSsh></serviceSsh>"
elif tmp == "telnet":
conf_str += "<serviceTelnet></serviceTelnet>"
elif tmp == "terminal":
conf_str += "<serviceTerminal></serviceTerminal>"
else:
module.fail_json(
msg='Error: Do not support the type [%s].' % tmp)
if local_ftp_dir:
conf_str += "<ftpDir></ftpDir>"
if local_user_level:
conf_str += "<userLevel></userLevel>"
if local_user_group:
conf_str += "<userGroupName></userGroupName>"
conf_str += CE_GET_LOCAL_USER_INFO_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
local_user_info = root.findall("aaa/lam/users/user")
if local_user_info:
for tmp in local_user_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["userName", "password", "userLevel", "ftpDir", "userGroupName",
"serviceTerminal", "serviceTelnet", "serviceFtp", "serviceSsh",
"serviceSnmp", "serviceDot1x"]:
tmp_dict[site.tag] = site.text
result["local_user_info"].append(tmp_dict)
if state == "present":
need_cfg = True
else:
if result["local_user_info"]:
for tmp in result["local_user_info"]:
if "userName" in tmp.keys():
if tmp["userName"] == local_user_name:
if not local_service_type and not local_user_level \
and not local_ftp_dir and not local_user_group:
need_cfg = True
if local_service_type:
if local_service_type == "none":
if tmp.get("serviceTerminal") == "true" or \
tmp.get("serviceTelnet") == "true" or \
tmp.get("serviceFtp") == "true" or \
tmp.get("serviceSsh") == "true" or \
tmp.get("serviceSnmp") == "true" or \
tmp.get("serviceDot1x") == "true":
need_cfg = True
elif local_service_type == "dot1x":
if tmp.get("serviceDot1x") == "true":
need_cfg = True
elif tmp == "ftp":
if tmp.get("serviceFtp") == "true":
need_cfg = True
elif tmp == "snmp":
if tmp.get("serviceSnmp") == "true":
need_cfg = True
elif tmp == "ssh":
if tmp.get("serviceSsh") == "true":
need_cfg = True
elif tmp == "telnet":
if tmp.get("serviceTelnet") == "true":
need_cfg = True
elif tmp == "terminal":
if tmp.get("serviceTerminal") == "true":
need_cfg = True
if local_user_level:
if tmp.get("userLevel") == local_user_level:
need_cfg = True
if local_ftp_dir:
if tmp.get("ftpDir") == local_ftp_dir:
need_cfg = True
if local_user_group:
if tmp.get("userGroupName") == local_user_group:
need_cfg = True
break
result["need_cfg"] = need_cfg
return result
def merge_local_user_info(self, **kwargs):
""" Merge local user information by netconf """
module = kwargs["module"]
local_user_name = module.params['local_user_name']
local_password = module.params['local_password']
local_service_type = module.params['local_service_type']
local_ftp_dir = module.params['local_ftp_dir']
local_user_level = module.params['local_user_level']
local_user_group = module.params['local_user_group']
state = module.params['state']
cmds = []
conf_str = CE_MERGE_LOCAL_USER_INFO_HEADER % local_user_name
if local_password:
conf_str += "<password>%s</password>" % local_password
if state == "present":
cmd = "local-user %s password cipher %s" % (
local_user_name, local_password)
cmds.append(cmd)
if local_service_type:
if local_service_type == "none":
conf_str += "<serviceTerminal>false</serviceTerminal>"
conf_str += "<serviceTelnet>false</serviceTelnet>"
conf_str += "<serviceFtp>false</serviceFtp>"
conf_str += "<serviceSsh>false</serviceSsh>"
conf_str += "<serviceSnmp>false</serviceSnmp>"
conf_str += "<serviceDot1x>false</serviceDot1x>"
cmd = "local-user %s service-type none" % local_user_name
cmds.append(cmd)
elif local_service_type == "dot1x":
if state == "present":
conf_str += "<serviceDot1x>true</serviceDot1x>"
cmd = "local-user %s service-type dot1x" % local_user_name
else:
conf_str += "<serviceDot1x>false</serviceDot1x>"
cmd = "undo local-user %s service-type" % local_user_name
cmds.append(cmd)
else:
option = local_service_type.split(" ")
for tmp in option:
if tmp == "dot1x":
module.fail_json(
msg='Error: Do not input dot1x with other service type.')
if tmp == "none":
module.fail_json(
msg='Error: Do not input none with other service type.')
if state == "present":
if tmp == "ftp":
conf_str += "<serviceFtp>true</serviceFtp>"
cmd = "local-user %s service-type ftp" % local_user_name
elif tmp == "snmp":
conf_str += "<serviceSnmp>true</serviceSnmp>"
cmd = "local-user %s service-type snmp" % local_user_name
elif tmp == "ssh":
conf_str += "<serviceSsh>true</serviceSsh>"
cmd = "local-user %s service-type ssh" % local_user_name
elif tmp == "telnet":
conf_str += "<serviceTelnet>true</serviceTelnet>"
cmd = "local-user %s service-type telnet" % local_user_name
elif tmp == "terminal":
conf_str += "<serviceTerminal>true</serviceTerminal>"
cmd = "local-user %s service-type terminal" % local_user_name
cmds.append(cmd)
else:
if tmp == "ftp":
conf_str += "<serviceFtp>false</serviceFtp>"
elif tmp == "snmp":
conf_str += "<serviceSnmp>false</serviceSnmp>"
elif tmp == "ssh":
conf_str += "<serviceSsh>false</serviceSsh>"
elif tmp == "telnet":
conf_str += "<serviceTelnet>false</serviceTelnet>"
elif tmp == "terminal":
conf_str += "<serviceTerminal>false</serviceTerminal>"
if state == "absent":
cmd = "undo local-user %s service-type" % local_user_name
cmds.append(cmd)
if local_ftp_dir:
if state == "present":
conf_str += "<ftpDir>%s</ftpDir>" % local_ftp_dir
cmd = "local-user %s ftp-directory %s" % (
local_user_name, local_ftp_dir)
cmds.append(cmd)
else:
conf_str += "<ftpDir></ftpDir>"
cmd = "undo local-user %s ftp-directory" % local_user_name
cmds.append(cmd)
if local_user_level:
if state == "present":
conf_str += "<userLevel>%s</userLevel>" % local_user_level
cmd = "local-user %s level %s" % (
local_user_name, local_user_level)
cmds.append(cmd)
else:
conf_str += "<userLevel></userLevel>"
cmd = "undo local-user %s level" % local_user_name
cmds.append(cmd)
if local_user_group:
if state == "present":
conf_str += "<userGroupName>%s</userGroupName>" % local_user_group
cmd = "local-user %s user-group %s" % (
local_user_name, local_user_group)
cmds.append(cmd)
else:
conf_str += "<userGroupName></userGroupName>"
cmd = "undo local-user %s user-group" % local_user_name
cmds.append(cmd)
conf_str += CE_MERGE_LOCAL_USER_INFO_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge local user info failed.')
return cmds
def delete_local_user_info(self, **kwargs):
""" Delete local user information by netconf """
module = kwargs["module"]
local_user_name = module.params['local_user_name']
conf_str = CE_DELETE_LOCAL_USER_INFO_HEADER % local_user_name
conf_str += CE_DELETE_LOCAL_USER_INFO_TAIL
cmds = []
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete local user info failed.')
cmd = "undo local-user %s" % local_user_name
cmds.append(cmd)
return cmds
def get_radius_server_cfg_ipv4(self, **kwargs):
""" Get radius server configure ipv4 """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ip = module.params['radius_server_ip']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
state = module.params['state']
result = dict()
result["radius_server_ip_v4"] = []
need_cfg = False
conf_str = CE_GET_RADIUS_SERVER_CFG_IPV4 % radius_group_name
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
radius_server_ip_v4 = root.findall(
"radius/rdsTemplates/rdsTemplate/rdsServerIPV4s/rdsServerIPV4")
if radius_server_ip_v4:
for tmp in radius_server_ip_v4:
tmp_dict = dict()
for site in tmp:
if site.tag in ["serverType", "serverIPAddress", "serverPort", "serverMode", "vpnName"]:
tmp_dict[site.tag] = site.text
result["radius_server_ip_v4"].append(tmp_dict)
if result["radius_server_ip_v4"]:
cfg = dict()
config_list = list()
if radius_server_type:
cfg["serverType"] = radius_server_type.lower()
if radius_server_ip:
cfg["serverIPAddress"] = radius_server_ip.lower()
if radius_server_port:
cfg["serverPort"] = radius_server_port.lower()
if radius_server_mode:
cfg["serverMode"] = radius_server_mode.lower()
if radius_vpn_name:
cfg["vpnName"] = radius_vpn_name.lower()
for tmp in result["radius_server_ip_v4"]:
exist_cfg = dict()
if radius_server_type:
exist_cfg["serverType"] = tmp.get("serverType").lower()
if radius_server_ip:
exist_cfg["serverIPAddress"] = tmp.get("serverIPAddress").lower()
if radius_server_port:
exist_cfg["serverPort"] = tmp.get("serverPort").lower()
if radius_server_mode:
exist_cfg["serverMode"] = tmp.get("serverMode").lower()
if radius_vpn_name:
exist_cfg["vpnName"] = tmp.get("vpnName").lower()
config_list.append(exist_cfg)
if cfg in config_list:
if state == "present":
need_cfg = False
else:
need_cfg = True
else:
if state == "present":
need_cfg = True
else:
need_cfg = False
result["need_cfg"] = need_cfg
return result
def merge_radius_server_cfg_ipv4(self, **kwargs):
""" Merge radius server configure ipv4 """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ip = module.params['radius_server_ip']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
conf_str = CE_MERGE_RADIUS_SERVER_CFG_IPV4 % (
radius_group_name, radius_server_type,
radius_server_ip, radius_server_port,
radius_server_mode, radius_vpn_name)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Merge radius server config ipv4 failed.')
cmds = []
cmd = "radius server group %s" % radius_group_name
cmds.append(cmd)
if radius_server_type == "Authentication":
cmd = "radius server authentication %s %s" % (
radius_server_ip, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
else:
cmd = "radius server accounting %s %s" % (
radius_server_ip, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
cmds.append(cmd)
return cmds
def delete_radius_server_cfg_ipv4(self, **kwargs):
""" Delete radius server configure ipv4 """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ip = module.params['radius_server_ip']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
conf_str = CE_DELETE_RADIUS_SERVER_CFG_IPV4 % (
radius_group_name, radius_server_type,
radius_server_ip, radius_server_port,
radius_server_mode, radius_vpn_name)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Create radius server config ipv4 failed.')
cmds = []
cmd = "radius server group %s" % radius_group_name
cmds.append(cmd)
if radius_server_type == "Authentication":
cmd = "undo radius server authentication %s %s" % (
radius_server_ip, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
else:
cmd = "undo radius server accounting %s %s" % (
radius_server_ip, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
cmds.append(cmd)
return cmds
def get_radius_server_cfg_ipv6(self, **kwargs):
""" Get radius server configure ipv6 """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ipv6 = module.params['radius_server_ipv6']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
state = module.params['state']
result = dict()
result["radius_server_ip_v6"] = []
need_cfg = False
conf_str = CE_GET_RADIUS_SERVER_CFG_IPV6 % radius_group_name
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
radius_server_ip_v6 = root.findall(
"radius/rdsTemplates/rdsTemplate/rdsServerIPV6s/rdsServerIPV6")
if radius_server_ip_v6:
for tmp in radius_server_ip_v6:
tmp_dict = dict()
for site in tmp:
if site.tag in ["serverType", "serverIPAddress", "serverPort", "serverMode"]:
tmp_dict[site.tag] = site.text
result["radius_server_ip_v6"].append(tmp_dict)
if result["radius_server_ip_v6"]:
cfg = dict()
config_list = list()
if radius_server_type:
cfg["serverType"] = radius_server_type.lower()
if radius_server_ipv6:
cfg["serverIPAddress"] = radius_server_ipv6.lower()
if radius_server_port:
cfg["serverPort"] = radius_server_port.lower()
if radius_server_mode:
cfg["serverMode"] = radius_server_mode.lower()
for tmp in result["radius_server_ip_v6"]:
exist_cfg = dict()
if radius_server_type:
exist_cfg["serverType"] = tmp.get("serverType").lower()
if radius_server_ipv6:
exist_cfg["serverIPAddress"] = tmp.get("serverIPAddress").lower()
if radius_server_port:
exist_cfg["serverPort"] = tmp.get("serverPort").lower()
if radius_server_mode:
exist_cfg["serverMode"] = tmp.get("serverMode").lower()
config_list.append(exist_cfg)
if cfg in config_list:
if state == "present":
need_cfg = False
else:
need_cfg = True
else:
if state == "present":
need_cfg = True
else:
need_cfg = False
result["need_cfg"] = need_cfg
return result
def merge_radius_server_cfg_ipv6(self, **kwargs):
""" Merge radius server configure ipv6 """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ipv6 = module.params['radius_server_ipv6']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
conf_str = CE_MERGE_RADIUS_SERVER_CFG_IPV6 % (
radius_group_name, radius_server_type,
radius_server_ipv6, radius_server_port,
radius_server_mode)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Merge radius server config ipv6 failed.')
cmds = []
cmd = "radius server group %s" % radius_group_name
cmds.append(cmd)
if radius_server_type == "Authentication":
cmd = "radius server authentication %s %s" % (
radius_server_ipv6, radius_server_port)
if radius_server_mode == "Secondary-server":
cmd += " secondary"
else:
cmd = "radius server accounting %s %s" % (
radius_server_ipv6, radius_server_port)
if radius_server_mode == "Secondary-server":
cmd += " secondary"
cmds.append(cmd)
return cmds
def delete_radius_server_cfg_ipv6(self, **kwargs):
""" Delete radius server configure ipv6 """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ipv6 = module.params['radius_server_ipv6']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
conf_str = CE_DELETE_RADIUS_SERVER_CFG_IPV6 % (
radius_group_name, radius_server_type,
radius_server_ipv6, radius_server_port,
radius_server_mode)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Create radius server config ipv6 failed.')
cmds = []
cmd = "radius server group %s" % radius_group_name
cmds.append(cmd)
if radius_server_type == "Authentication":
cmd = "undo radius server authentication %s %s" % (
radius_server_ipv6, radius_server_port)
if radius_server_mode == "Secondary-server":
cmd += " secondary"
else:
cmd = "undo radius server accounting %s %s" % (
radius_server_ipv6, radius_server_port)
if radius_server_mode == "Secondary-server":
cmd += " secondary"
cmds.append(cmd)
return cmds
def get_radius_server_name(self, **kwargs):
""" Get radius server name """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_name = module.params['radius_server_name']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
state = module.params['state']
result = dict()
result["radius_server_name_cfg"] = []
need_cfg = False
conf_str = CE_GET_RADIUS_SERVER_NAME % radius_group_name
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
radius_server_name_cfg = root.findall(
"radius/rdsTemplates/rdsTemplate/rdsServerNames/rdsServerName")
if radius_server_name_cfg:
for tmp in radius_server_name_cfg:
tmp_dict = dict()
for site in tmp:
if site.tag in ["serverType", "serverName", "serverPort", "serverMode", "vpnName"]:
tmp_dict[site.tag] = site.text
result["radius_server_name_cfg"].append(tmp_dict)
if result["radius_server_name_cfg"]:
cfg = dict()
config_list = list()
if radius_server_type:
cfg["serverType"] = radius_server_type.lower()
if radius_server_name:
cfg["serverName"] = radius_server_name.lower()
if radius_server_port:
cfg["serverPort"] = radius_server_port.lower()
if radius_server_mode:
cfg["serverMode"] = radius_server_mode.lower()
if radius_vpn_name:
cfg["vpnName"] = radius_vpn_name.lower()
for tmp in result["radius_server_name_cfg"]:
exist_cfg = dict()
if radius_server_type:
exist_cfg["serverType"] = tmp.get("serverType").lower()
if radius_server_name:
exist_cfg["serverName"] = tmp.get("serverName").lower()
if radius_server_port:
exist_cfg["serverPort"] = tmp.get("serverPort").lower()
if radius_server_mode:
exist_cfg["serverMode"] = tmp.get("serverMode").lower()
if radius_vpn_name:
exist_cfg["vpnName"] = tmp.get("vpnName").lower()
config_list.append(exist_cfg)
if cfg in config_list:
if state == "present":
need_cfg = False
else:
need_cfg = True
else:
if state == "present":
need_cfg = True
else:
need_cfg = False
result["need_cfg"] = need_cfg
return result
def merge_radius_server_name(self, **kwargs):
""" Merge radius server name """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_name = module.params['radius_server_name']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
conf_str = CE_MERGE_RADIUS_SERVER_NAME % (
radius_group_name, radius_server_type,
radius_server_name, radius_server_port,
radius_server_mode, radius_vpn_name)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge radius server name failed.')
cmds = []
cmd = "radius server group %s" % radius_group_name
cmds.append(cmd)
if radius_server_type == "Authentication":
cmd = "radius server authentication hostname %s %s" % (
radius_server_name, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
else:
cmd = "radius server accounting hostname %s %s" % (
radius_server_name, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
cmds.append(cmd)
return cmds
def delete_radius_server_name(self, **kwargs):
""" Delete radius server name """
module = kwargs["module"]
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_name = module.params['radius_server_name']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
conf_str = CE_DELETE_RADIUS_SERVER_NAME % (
radius_group_name, radius_server_type,
radius_server_name, radius_server_port,
radius_server_mode, radius_vpn_name)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: delete radius server name failed.')
cmds = []
cmd = "radius server group %s" % radius_group_name
cmds.append(cmd)
if radius_server_type == "Authentication":
cmd = "undo radius server authentication hostname %s %s" % (
radius_server_name, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
else:
cmd = "undo radius server accounting hostname %s %s" % (
radius_server_name, radius_server_port)
if radius_vpn_name and radius_vpn_name != "_public_":
cmd += " vpn-instance %s" % radius_vpn_name
if radius_server_mode == "Secondary-server":
cmd += " secondary"
cmds.append(cmd)
return cmds
def get_hwtacacs_server_cfg_ipv4(self, **kwargs):
""" Get hwtacacs server configure ipv4 """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_ip = module.params["hwtacacs_server_ip"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"]
state = module.params["state"]
result = dict()
result["hwtacacs_server_cfg_ipv4"] = []
need_cfg = False
conf_str = CE_GET_HWTACACS_SERVER_CFG_IPV4 % hwtacacs_template
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
hwtacacs_server_cfg_ipv4 = root.findall(
"hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacSrvCfgs/hwTacSrvCfg")
if hwtacacs_server_cfg_ipv4:
for tmp in hwtacacs_server_cfg_ipv4:
tmp_dict = dict()
for site in tmp:
if site.tag in ["serverIpAddress", "serverType", "isSecondaryServer", "isPublicNet", "vpnName"]:
tmp_dict[site.tag] = site.text
result["hwtacacs_server_cfg_ipv4"].append(tmp_dict)
if result["hwtacacs_server_cfg_ipv4"]:
cfg = dict()
config_list = list()
if hwtacacs_server_ip:
cfg["serverIpAddress"] = hwtacacs_server_ip.lower()
if hwtacacs_server_type:
cfg["serverType"] = hwtacacs_server_type.lower()
if hwtacacs_is_secondary_server:
cfg["isSecondaryServer"] = str(hwtacacs_is_secondary_server).lower()
if hwtacacs_is_public_net:
cfg["isPublicNet"] = str(hwtacacs_is_public_net).lower()
if hwtacacs_vpn_name:
cfg["vpnName"] = hwtacacs_vpn_name.lower()
for tmp in result["hwtacacs_server_cfg_ipv4"]:
exist_cfg = dict()
if hwtacacs_server_ip:
exist_cfg["serverIpAddress"] = tmp.get("serverIpAddress").lower()
if hwtacacs_server_type:
exist_cfg["serverType"] = tmp.get("serverType").lower()
if hwtacacs_is_secondary_server:
exist_cfg["isSecondaryServer"] = tmp.get("isSecondaryServer").lower()
if hwtacacs_is_public_net:
exist_cfg["isPublicNet"] = tmp.get("isPublicNet").lower()
if hwtacacs_vpn_name:
exist_cfg["vpnName"] = tmp.get("vpnName").lower()
config_list.append(exist_cfg)
if cfg in config_list:
if state == "present":
need_cfg = False
else:
need_cfg = True
else:
if state == "present":
need_cfg = True
else:
need_cfg = False
result["need_cfg"] = need_cfg
return result
def merge_hwtacacs_server_cfg_ipv4(self, **kwargs):
""" Merge hwtacacs server configure ipv4 """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_ip = module.params["hwtacacs_server_ip"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"]
conf_str = CE_MERGE_HWTACACS_SERVER_CFG_IPV4 % (
hwtacacs_template, hwtacacs_server_ip,
hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(),
hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower())
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Merge hwtacacs server config ipv4 failed.')
cmds = []
cmd = "hwtacacs server template %s" % hwtacacs_template
cmds.append(cmd)
if hwtacacs_server_type == "Authentication":
cmd = "hwtacacs server authentication %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Authorization":
cmd = "hwtacacs server authorization %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Accounting":
cmd = "hwtacacs server accounting %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Common":
cmd = "hwtacacs server %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
cmds.append(cmd)
return cmds
def delete_hwtacacs_server_cfg_ipv4(self, **kwargs):
""" Delete hwtacacs server configure ipv4 """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_ip = module.params["hwtacacs_server_ip"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"]
conf_str = CE_DELETE_HWTACACS_SERVER_CFG_IPV4 % (
hwtacacs_template, hwtacacs_server_ip,
hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(),
hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower())
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete hwtacacs server config ipv4 failed.')
cmds = []
cmd = "hwtacacs server template %s" % hwtacacs_template
cmds.append(cmd)
if hwtacacs_server_type == "Authentication":
cmd = "undo hwtacacs server authentication %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Authorization":
cmd = "undo hwtacacs server authorization %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Accounting":
cmd = "undo hwtacacs server accounting %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Common":
cmd = "undo hwtacacs server %s" % hwtacacs_server_ip
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
cmds.append(cmd)
return cmds
def get_hwtacacs_server_cfg_ipv6(self, **kwargs):
""" Get hwtacacs server configure ipv6 """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_ipv6 = module.params["hwtacacs_server_ipv6"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
state = module.params["state"]
result = dict()
result["hwtacacs_server_cfg_ipv6"] = []
need_cfg = False
conf_str = CE_GET_HWTACACS_SERVER_CFG_IPV6 % hwtacacs_template
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
hwtacacs_server_cfg_ipv6 = root.findall(
"hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacIpv6SrvCfgs/hwTacIpv6SrvCfg")
if hwtacacs_server_cfg_ipv6:
for tmp in hwtacacs_server_cfg_ipv6:
tmp_dict = dict()
for site in tmp:
if site.tag in ["serverIpAddress", "serverType", "isSecondaryServer", "vpnName"]:
tmp_dict[site.tag] = site.text
result["hwtacacs_server_cfg_ipv6"].append(tmp_dict)
if result["hwtacacs_server_cfg_ipv6"]:
cfg = dict()
config_list = list()
if hwtacacs_server_ipv6:
cfg["serverIpAddress"] = hwtacacs_server_ipv6.lower()
if hwtacacs_server_type:
cfg["serverType"] = hwtacacs_server_type.lower()
if hwtacacs_is_secondary_server:
cfg["isSecondaryServer"] = str(hwtacacs_is_secondary_server).lower()
if hwtacacs_vpn_name:
cfg["vpnName"] = hwtacacs_vpn_name.lower()
for tmp in result["hwtacacs_server_cfg_ipv6"]:
exist_cfg = dict()
if hwtacacs_server_ipv6:
exist_cfg["serverIpAddress"] = tmp.get("serverIpAddress").lower()
if hwtacacs_server_type:
exist_cfg["serverType"] = tmp.get("serverType").lower()
if hwtacacs_is_secondary_server:
exist_cfg["isSecondaryServer"] = tmp.get("isSecondaryServer").lower()
if hwtacacs_vpn_name:
exist_cfg["vpnName"] = tmp.get("vpnName").lower()
config_list.append(exist_cfg)
if cfg in config_list:
if state == "present":
need_cfg = False
else:
need_cfg = True
else:
if state == "present":
need_cfg = True
else:
need_cfg = False
result["need_cfg"] = need_cfg
return result
def merge_hwtacacs_server_cfg_ipv6(self, **kwargs):
""" Merge hwtacacs server configure ipv6 """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_ipv6 = module.params["hwtacacs_server_ipv6"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
conf_str = CE_MERGE_HWTACACS_SERVER_CFG_IPV6 % (
hwtacacs_template, hwtacacs_server_ipv6,
hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(),
hwtacacs_vpn_name)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Merge hwtacacs server config ipv6 failed.')
cmds = []
cmd = "hwtacacs server template %s" % hwtacacs_template
cmds.append(cmd)
if hwtacacs_server_type == "Authentication":
cmd = "hwtacacs server authentication %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Authorization":
cmd = "hwtacacs server authorization %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Accounting":
cmd = "hwtacacs server accounting %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Common":
cmd = "hwtacacs server %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
cmds.append(cmd)
return cmds
def delete_hwtacacs_server_cfg_ipv6(self, **kwargs):
""" Delete hwtacacs server configure ipv6 """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_ipv6 = module.params["hwtacacs_server_ipv6"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
conf_str = CE_DELETE_HWTACACS_SERVER_CFG_IPV6 % (
hwtacacs_template, hwtacacs_server_ipv6,
hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(),
hwtacacs_vpn_name)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete hwtacacs server config ipv6 failed.')
cmds = []
cmd = "hwtacacs server template %s" % hwtacacs_template
cmds.append(cmd)
if hwtacacs_server_type == "Authentication":
cmd = "undo hwtacacs server authentication %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Authorization":
cmd = "undo hwtacacs server authorization %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Accounting":
cmd = "undo hwtacacs server accounting %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Common":
cmd = "undo hwtacacs server %s" % hwtacacs_server_ipv6
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_secondary_server:
cmd += " secondary"
cmds.append(cmd)
return cmds
def get_hwtacacs_host_server_cfg(self, **kwargs):
""" Get hwtacacs host server configure """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_host_name = module.params["hwtacacs_server_host_name"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"]
state = module.params["state"]
result = dict()
result["hwtacacs_server_name_cfg"] = []
need_cfg = False
conf_str = CE_GET_HWTACACS_HOST_SERVER_CFG % hwtacacs_template
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
hwtacacs_server_name_cfg = root.findall(
"hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacHostSrvCfgs/hwTacHostSrvCfg")
if hwtacacs_server_name_cfg:
for tmp in hwtacacs_server_name_cfg:
tmp_dict = dict()
for site in tmp:
if site.tag in ["serverHostName", "serverType", "isSecondaryServer", "isPublicNet", "vpnName"]:
tmp_dict[site.tag] = site.text
result["hwtacacs_server_name_cfg"].append(tmp_dict)
if result["hwtacacs_server_name_cfg"]:
cfg = dict()
config_list = list()
if hwtacacs_server_host_name:
cfg["serverHostName"] = hwtacacs_server_host_name.lower()
if hwtacacs_server_type:
cfg["serverType"] = hwtacacs_server_type.lower()
if hwtacacs_is_secondary_server:
cfg["isSecondaryServer"] = str(hwtacacs_is_secondary_server).lower()
if hwtacacs_is_public_net:
cfg["isPublicNet"] = str(hwtacacs_is_public_net).lower()
if hwtacacs_vpn_name:
cfg["vpnName"] = hwtacacs_vpn_name.lower()
for tmp in result["hwtacacs_server_name_cfg"]:
exist_cfg = dict()
if hwtacacs_server_host_name:
exist_cfg["serverHostName"] = tmp.get("serverHostName").lower()
if hwtacacs_server_type:
exist_cfg["serverType"] = tmp.get("serverType").lower()
if hwtacacs_is_secondary_server:
exist_cfg["isSecondaryServer"] = tmp.get("isSecondaryServer").lower()
if hwtacacs_is_public_net:
exist_cfg["isPublicNet"] = tmp.get("isPublicNet").lower()
if hwtacacs_vpn_name:
exist_cfg["vpnName"] = tmp.get("vpnName").lower()
config_list.append(exist_cfg)
if cfg in config_list:
if state == "present":
need_cfg = False
else:
need_cfg = True
else:
if state == "present":
need_cfg = True
else:
need_cfg = False
result["need_cfg"] = need_cfg
return result
def merge_hwtacacs_host_server_cfg(self, **kwargs):
""" Merge hwtacacs host server configure """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_host_name = module.params["hwtacacs_server_host_name"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"]
conf_str = CE_MERGE_HWTACACS_HOST_SERVER_CFG % (
hwtacacs_template, hwtacacs_server_host_name,
hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(),
hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower())
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Merge hwtacacs host server config failed.')
cmds = []
if hwtacacs_server_type == "Authentication":
cmd = "hwtacacs server authentication host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Authorization":
cmd = "hwtacacs server authorization host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Accounting":
cmd = "hwtacacs server accounting host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Common":
cmd = "hwtacacs server host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
cmds.append(cmd)
return cmds
def delete_hwtacacs_host_server_cfg(self, **kwargs):
""" Delete hwtacacs host server configure """
module = kwargs["module"]
hwtacacs_template = module.params["hwtacacs_template"]
hwtacacs_server_host_name = module.params["hwtacacs_server_host_name"]
hwtacacs_server_type = module.params["hwtacacs_server_type"]
hwtacacs_is_secondary_server = module.params[
"hwtacacs_is_secondary_server"]
hwtacacs_vpn_name = module.params["hwtacacs_vpn_name"]
hwtacacs_is_public_net = module.params["hwtacacs_is_public_net"]
conf_str = CE_DELETE_HWTACACS_HOST_SERVER_CFG % (
hwtacacs_template, hwtacacs_server_host_name,
hwtacacs_server_type, str(hwtacacs_is_secondary_server).lower(),
hwtacacs_vpn_name, str(hwtacacs_is_public_net).lower())
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(
msg='Error: Delete hwtacacs host server config failed.')
cmds = []
if hwtacacs_server_type == "Authentication":
cmd = "undo hwtacacs server authentication host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Authorization":
cmd = "undo hwtacacs server authorization host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Accounting":
cmd = "undo hwtacacs server accounting host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
elif hwtacacs_server_type == "Common":
cmd = "undo hwtacacs server host host-name %s" % hwtacacs_server_host_name
if hwtacacs_vpn_name and hwtacacs_vpn_name != "_public_":
cmd += " vpn-instance %s" % hwtacacs_vpn_name
if hwtacacs_is_public_net:
cmd += " public-net"
if hwtacacs_is_secondary_server:
cmd += " secondary"
cmds.append(cmd)
return cmds
def check_name(**kwargs):
""" Check invalid name """
module = kwargs["module"]
name = kwargs["name"]
invalid_char = kwargs["invalid_char"]
for item in invalid_char:
if item in name:
module.fail_json(
msg='Error: Invalid char %s is in the name %s ' % (item, name))
def check_module_argument(**kwargs):
""" Check module argument """
module = kwargs["module"]
# local para
local_user_name = module.params['local_user_name']
local_password = module.params['local_password']
local_ftp_dir = module.params['local_ftp_dir']
local_user_level = module.params['local_user_level']
local_user_group = module.params['local_user_group']
# radius para
radius_group_name = module.params['radius_group_name']
radius_server_ip = module.params['radius_server_ip']
radius_server_port = module.params['radius_server_port']
radius_vpn_name = module.params['radius_vpn_name']
radius_server_name = module.params['radius_server_name']
# hwtacacs para
hwtacacs_template = module.params['hwtacacs_template']
hwtacacs_server_ip = module.params['hwtacacs_server_ip']
hwtacacs_vpn_name = module.params['hwtacacs_vpn_name']
hwtacacs_server_host_name = module.params['hwtacacs_server_host_name']
if local_user_name:
if len(local_user_name) > 253:
module.fail_json(
msg='Error: The local_user_name %s is large than 253.' % local_user_name)
check_name(module=module, name=local_user_name,
invalid_char=INVALID_USER_NAME_CHAR)
if local_password and len(local_password) > 255:
module.fail_json(
msg='Error: The local_password %s is large than 255.' % local_password)
if local_user_level:
if int(local_user_level) > 15 or int(local_user_level) < 0:
module.fail_json(
msg='Error: The local_user_level %s is out of [0 - 15].' % local_user_level)
if local_ftp_dir:
if len(local_ftp_dir) > 255:
module.fail_json(
msg='Error: The local_ftp_dir %s is large than 255.' % local_ftp_dir)
if local_user_group:
if len(local_user_group) > 32 or len(local_user_group) < 1:
module.fail_json(
msg='Error: The local_user_group %s is out of [1 - 32].' % local_user_group)
if radius_group_name and len(radius_group_name) > 32:
module.fail_json(
msg='Error: The radius_group_name %s is large than 32.' % radius_group_name)
if radius_server_ip and not check_ip_addr(radius_server_ip):
module.fail_json(
msg='Error: The radius_server_ip %s is invalid.' % radius_server_ip)
if radius_server_port and not radius_server_port.isdigit():
module.fail_json(
msg='Error: The radius_server_port %s is invalid.' % radius_server_port)
if radius_vpn_name:
if len(radius_vpn_name) > 31:
module.fail_json(
msg='Error: The radius_vpn_name %s is large than 31.' % radius_vpn_name)
if ' ' in radius_vpn_name:
module.fail_json(
msg='Error: The radius_vpn_name %s include space.' % radius_vpn_name)
if radius_server_name:
if len(radius_server_name) > 255:
module.fail_json(
msg='Error: The radius_server_name %s is large than 255.' % radius_server_name)
if ' ' in radius_server_name:
module.fail_json(
msg='Error: The radius_server_name %s include space.' % radius_server_name)
if hwtacacs_template and len(hwtacacs_template) > 32:
module.fail_json(
msg='Error: The hwtacacs_template %s is large than 32.' % hwtacacs_template)
if hwtacacs_server_ip and not check_ip_addr(hwtacacs_server_ip):
module.fail_json(
msg='Error: The hwtacacs_server_ip %s is invalid.' % hwtacacs_server_ip)
if hwtacacs_vpn_name:
if len(hwtacacs_vpn_name) > 31:
module.fail_json(
msg='Error: The hwtacacs_vpn_name %s is large than 31.' % hwtacacs_vpn_name)
if ' ' in hwtacacs_vpn_name:
module.fail_json(
msg='Error: The hwtacacs_vpn_name %s include space.' % hwtacacs_vpn_name)
if hwtacacs_server_host_name:
if len(hwtacacs_server_host_name) > 255:
module.fail_json(
msg='Error: The hwtacacs_server_host_name %s is large than 255.' % hwtacacs_server_host_name)
if ' ' in hwtacacs_server_host_name:
module.fail_json(
msg='Error: The hwtacacs_server_host_name %s include space.' % hwtacacs_server_host_name)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
local_user_name=dict(type='str'),
local_password=dict(type='str', no_log=True),
local_service_type=dict(type='str'),
local_ftp_dir=dict(type='str'),
local_user_level=dict(type='str'),
local_user_group=dict(type='str'),
radius_group_name=dict(type='str'),
radius_server_type=dict(choices=['Authentication', 'Accounting']),
radius_server_ip=dict(type='str'),
radius_server_ipv6=dict(type='str'),
radius_server_port=dict(type='str'),
radius_server_mode=dict(
choices=['Secondary-server', 'Primary-server']),
radius_vpn_name=dict(type='str'),
radius_server_name=dict(type='str'),
hwtacacs_template=dict(type='str'),
hwtacacs_server_ip=dict(type='str'),
hwtacacs_server_ipv6=dict(type='str'),
hwtacacs_server_type=dict(
choices=['Authentication', 'Authorization', 'Accounting', 'Common']),
hwtacacs_is_secondary_server=dict(
required=False, default=False, type='bool'),
hwtacacs_vpn_name=dict(type='str'),
hwtacacs_is_public_net=dict(
required=False, default=False, type='bool'),
hwtacacs_server_host_name=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
check_module_argument(module=module)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
# common para
state = module.params['state']
# local para
local_user_name = module.params['local_user_name']
local_password = module.params['local_password']
local_service_type = module.params['local_service_type']
local_ftp_dir = module.params['local_ftp_dir']
local_user_level = module.params['local_user_level']
local_user_group = module.params['local_user_group']
# radius para
radius_group_name = module.params['radius_group_name']
radius_server_type = module.params['radius_server_type']
radius_server_ip = module.params['radius_server_ip']
radius_server_ipv6 = module.params['radius_server_ipv6']
radius_server_port = module.params['radius_server_port']
radius_server_mode = module.params['radius_server_mode']
radius_vpn_name = module.params['radius_vpn_name']
radius_server_name = module.params['radius_server_name']
# hwtacacs para
hwtacacs_template = module.params['hwtacacs_template']
hwtacacs_server_ip = module.params['hwtacacs_server_ip']
hwtacacs_server_ipv6 = module.params['hwtacacs_server_ipv6']
hwtacacs_server_type = module.params['hwtacacs_server_type']
hwtacacs_is_secondary_server = module.params[
'hwtacacs_is_secondary_server']
hwtacacs_vpn_name = module.params['hwtacacs_vpn_name']
hwtacacs_is_public_net = module.params['hwtacacs_is_public_net']
hwtacacs_server_host_name = module.params['hwtacacs_server_host_name']
ce_aaa_server_host = AaaServerHost()
if not ce_aaa_server_host:
module.fail_json(msg='Error: Construct ce_aaa_server failed.')
# get proposed
proposed["state"] = state
if local_user_name:
proposed["local_user_name"] = local_user_name
if local_password:
proposed["local_password"] = "******"
if local_service_type:
proposed["local_service_type"] = local_service_type
if local_ftp_dir:
proposed["local_ftp_dir"] = local_ftp_dir
if local_user_level:
proposed["local_user_level"] = local_user_level
if local_user_group:
proposed["local_user_group"] = local_user_group
if radius_group_name:
proposed["radius_group_name"] = radius_group_name
if radius_server_type:
proposed["radius_server_type"] = radius_server_type
if radius_server_ip:
proposed["radius_server_ip"] = radius_server_ip
if radius_server_ipv6:
proposed["radius_server_ipv6"] = radius_server_ipv6
if radius_server_port:
proposed["radius_server_port"] = radius_server_port
if radius_server_mode:
proposed["radius_server_mode"] = radius_server_mode
if radius_vpn_name:
proposed["radius_vpn_name"] = radius_vpn_name
if radius_server_name:
proposed["radius_server_name"] = radius_server_name
if hwtacacs_template:
proposed["hwtacacs_template"] = hwtacacs_template
if hwtacacs_server_ip:
proposed["hwtacacs_server_ip"] = hwtacacs_server_ip
if hwtacacs_server_ipv6:
proposed["hwtacacs_server_ipv6"] = hwtacacs_server_ipv6
if hwtacacs_server_type:
proposed["hwtacacs_server_type"] = hwtacacs_server_type
proposed["hwtacacs_is_secondary_server"] = hwtacacs_is_secondary_server
if hwtacacs_vpn_name:
proposed["hwtacacs_vpn_name"] = hwtacacs_vpn_name
proposed["hwtacacs_is_public_net"] = hwtacacs_is_public_net
if hwtacacs_server_host_name:
proposed["hwtacacs_server_host_name"] = hwtacacs_server_host_name
if local_user_name:
if state == "present" and not local_password:
module.fail_json(
msg='Error: Please input local_password when config local user.')
local_user_result = ce_aaa_server_host.get_local_user_info(
module=module)
existing["local user name"] = local_user_result["local_user_info"]
if state == "present":
# present local user
if local_user_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_local_user_info(module=module)
changed = True
updates.append(cmd)
else:
# absent local user
if local_user_result["need_cfg"]:
if not local_service_type and not local_ftp_dir and not local_user_level and not local_user_group:
cmd = ce_aaa_server_host.delete_local_user_info(
module=module)
else:
cmd = ce_aaa_server_host.merge_local_user_info(
module=module)
changed = True
updates.append(cmd)
local_user_result = ce_aaa_server_host.get_local_user_info(
module=module)
end_state["local user name"] = local_user_result["local_user_info"]
if radius_group_name:
if not radius_server_ip and not radius_server_ipv6 and not radius_server_name:
module.fail_json(
msg='Error: Please input radius_server_ip or radius_server_ipv6 or radius_server_name.')
if radius_server_ip and radius_server_ipv6:
module.fail_json(
msg='Error: Please do not input radius_server_ip and radius_server_ipv6 at the same time.')
if not radius_server_type or not radius_server_port or not radius_server_mode or not radius_vpn_name:
module.fail_json(
msg='Error: Please input radius_server_type radius_server_port radius_server_mode radius_vpn_name.')
if radius_server_ip:
rds_server_ipv4_result = ce_aaa_server_host.get_radius_server_cfg_ipv4(
module=module)
if radius_server_ipv6:
rds_server_ipv6_result = ce_aaa_server_host.get_radius_server_cfg_ipv6(
module=module)
if radius_server_name:
rds_server_name_result = ce_aaa_server_host.get_radius_server_name(
module=module)
if radius_server_ip and rds_server_ipv4_result["radius_server_ip_v4"]:
existing["radius server ipv4"] = rds_server_ipv4_result[
"radius_server_ip_v4"]
if radius_server_ipv6 and rds_server_ipv6_result["radius_server_ip_v6"]:
existing["radius server ipv6"] = rds_server_ipv6_result[
"radius_server_ip_v6"]
if radius_server_name and rds_server_name_result["radius_server_name_cfg"]:
existing["radius server name cfg"] = rds_server_name_result[
"radius_server_name_cfg"]
if state == "present":
if radius_server_ip and rds_server_ipv4_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_radius_server_cfg_ipv4(
module=module)
changed = True
updates.append(cmd)
if radius_server_ipv6 and rds_server_ipv6_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_radius_server_cfg_ipv6(
module=module)
changed = True
updates.append(cmd)
if radius_server_name and rds_server_name_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_radius_server_name(
module=module)
changed = True
updates.append(cmd)
else:
if radius_server_ip and rds_server_ipv4_result["need_cfg"]:
cmd = ce_aaa_server_host.delete_radius_server_cfg_ipv4(
module=module)
changed = True
updates.append(cmd)
if radius_server_ipv6 and rds_server_ipv6_result["need_cfg"]:
cmd = ce_aaa_server_host.delete_radius_server_cfg_ipv6(
module=module)
changed = True
updates.append(cmd)
if radius_server_name and rds_server_name_result["need_cfg"]:
cmd = ce_aaa_server_host.delete_radius_server_name(
module=module)
changed = True
updates.append(cmd)
if radius_server_ip:
rds_server_ipv4_result = ce_aaa_server_host.get_radius_server_cfg_ipv4(
module=module)
if radius_server_ipv6:
rds_server_ipv6_result = ce_aaa_server_host.get_radius_server_cfg_ipv6(
module=module)
if radius_server_name:
rds_server_name_result = ce_aaa_server_host.get_radius_server_name(
module=module)
if radius_server_ip and rds_server_ipv4_result["radius_server_ip_v4"]:
end_state["radius server ipv4"] = rds_server_ipv4_result[
"radius_server_ip_v4"]
if radius_server_ipv6 and rds_server_ipv6_result["radius_server_ip_v6"]:
end_state["radius server ipv6"] = rds_server_ipv6_result[
"radius_server_ip_v6"]
if radius_server_name and rds_server_name_result["radius_server_name_cfg"]:
end_state["radius server name cfg"] = rds_server_name_result[
"radius_server_name_cfg"]
if hwtacacs_template:
if not hwtacacs_server_ip and not hwtacacs_server_ipv6 and not hwtacacs_server_host_name:
module.fail_json(
msg='Error: Please input hwtacacs_server_ip or hwtacacs_server_ipv6 or hwtacacs_server_host_name.')
if not hwtacacs_server_type or not hwtacacs_vpn_name:
module.fail_json(
msg='Error: Please input hwtacacs_server_type hwtacacs_vpn_name.')
if hwtacacs_server_ip and hwtacacs_server_ipv6:
module.fail_json(
msg='Error: Please do not set hwtacacs_server_ip and hwtacacs_server_ipv6 at the same time.')
if hwtacacs_vpn_name and hwtacacs_is_public_net:
module.fail_json(
msg='Error: Please do not set vpn and public net at the same time.')
if hwtacacs_server_ip:
hwtacacs_server_ipv4_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv4(
module=module)
if hwtacacs_server_ipv6:
hwtacacs_server_ipv6_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv6(
module=module)
if hwtacacs_server_host_name:
hwtacacs_host_name_result = ce_aaa_server_host.get_hwtacacs_host_server_cfg(
module=module)
if hwtacacs_server_ip and hwtacacs_server_ipv4_result["hwtacacs_server_cfg_ipv4"]:
existing["hwtacacs server cfg ipv4"] = hwtacacs_server_ipv4_result[
"hwtacacs_server_cfg_ipv4"]
if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["hwtacacs_server_cfg_ipv6"]:
existing["hwtacacs server cfg ipv6"] = hwtacacs_server_ipv6_result[
"hwtacacs_server_cfg_ipv6"]
if hwtacacs_server_host_name and hwtacacs_host_name_result["hwtacacs_server_name_cfg"]:
existing["hwtacacs server name cfg"] = hwtacacs_host_name_result[
"hwtacacs_server_name_cfg"]
if state == "present":
if hwtacacs_server_ip and hwtacacs_server_ipv4_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_hwtacacs_server_cfg_ipv4(
module=module)
changed = True
updates.append(cmd)
if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_hwtacacs_server_cfg_ipv6(
module=module)
changed = True
updates.append(cmd)
if hwtacacs_server_host_name and hwtacacs_host_name_result["need_cfg"]:
cmd = ce_aaa_server_host.merge_hwtacacs_host_server_cfg(
module=module)
changed = True
updates.append(cmd)
else:
if hwtacacs_server_ip and hwtacacs_server_ipv4_result["need_cfg"]:
cmd = ce_aaa_server_host.delete_hwtacacs_server_cfg_ipv4(
module=module)
changed = True
updates.append(cmd)
if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["need_cfg"]:
cmd = ce_aaa_server_host.delete_hwtacacs_server_cfg_ipv6(
module=module)
changed = True
updates.append(cmd)
if hwtacacs_server_host_name and hwtacacs_host_name_result["need_cfg"]:
cmd = ce_aaa_server_host.delete_hwtacacs_host_server_cfg(
module=module)
changed = True
updates.append(cmd)
if hwtacacs_server_ip:
hwtacacs_server_ipv4_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv4(
module=module)
if hwtacacs_server_ipv6:
hwtacacs_server_ipv6_result = ce_aaa_server_host.get_hwtacacs_server_cfg_ipv6(
module=module)
if hwtacacs_server_host_name:
hwtacacs_host_name_result = ce_aaa_server_host.get_hwtacacs_host_server_cfg(
module=module)
if hwtacacs_server_ip and hwtacacs_server_ipv4_result["hwtacacs_server_cfg_ipv4"]:
end_state["hwtacacs server cfg ipv4"] = hwtacacs_server_ipv4_result[
"hwtacacs_server_cfg_ipv4"]
if hwtacacs_server_ipv6 and hwtacacs_server_ipv6_result["hwtacacs_server_cfg_ipv6"]:
end_state["hwtacacs server cfg ipv6"] = hwtacacs_server_ipv6_result[
"hwtacacs_server_cfg_ipv6"]
if hwtacacs_server_host_name and hwtacacs_host_name_result["hwtacacs_server_name_cfg"]:
end_state["hwtacacs server name cfg"] = hwtacacs_host_name_result[
"hwtacacs_server_name_cfg"]
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/cloudengine/ce_aaa_server_host.py
|
Python
|
gpl-3.0
| 104,262
|
#!/usr/bin/python3
import os
try:
from dateutil import parser
du = 1
except:
du = 0
print ("No dateutl!" )
if du == 0:
os.system("sudo pip install python-dateutil")
else:
print ("date util already installed")
|
mikehankey/fireball_camera
|
update-install.py
|
Python
|
gpl-3.0
| 231
|
"""Device discovery functions for Zigbee Home Automation."""
from __future__ import annotations
from collections import Counter
import logging
from typing import Callable
from homeassistant import const as ha_const
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers.typing import HomeAssistantType
from . import const as zha_const, registries as zha_regs, typing as zha_typing
from .. import ( # noqa: F401 pylint: disable=unused-import,
binary_sensor,
climate,
cover,
device_tracker,
fan,
light,
lock,
number,
sensor,
switch,
)
from .channels import base
_LOGGER = logging.getLogger(__name__)
@callback
async def async_add_entities(
_async_add_entities: Callable,
entities: list[
tuple[
zha_typing.ZhaEntityType,
tuple[str, zha_typing.ZhaDeviceType, list[zha_typing.ChannelType]],
]
],
update_before_add: bool = True,
) -> None:
"""Add entities helper."""
if not entities:
return
to_add = [ent_cls(*args) for ent_cls, args in entities]
_async_add_entities(to_add, update_before_add=update_before_add)
entities.clear()
class ProbeEndpoint:
"""All discovered channels and entities of an endpoint."""
def __init__(self):
"""Initialize instance."""
self._device_configs = {}
@callback
def discover_entities(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
self.discover_by_device_type(channel_pool)
self.discover_by_cluster_id(channel_pool)
@callback
def discover_by_device_type(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
unique_id = channel_pool.unique_id
component = self._device_configs.get(unique_id, {}).get(ha_const.CONF_TYPE)
if component is None:
ep_profile_id = channel_pool.endpoint.profile_id
ep_device_type = channel_pool.endpoint.device_type
component = zha_regs.DEVICE_CLASS[ep_profile_id].get(ep_device_type)
if component and component in zha_const.PLATFORMS:
channels = channel_pool.unclaimed_channels()
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, channel_pool.manufacturer, channel_pool.model, channels
)
if entity_class is None:
return
channel_pool.claim_channels(claimed)
channel_pool.async_new_entity(component, entity_class, unique_id, claimed)
@callback
def discover_by_cluster_id(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
items = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.items()
single_input_clusters = {
cluster_class: match
for cluster_class, match in items
if not isinstance(cluster_class, int)
}
remaining_channels = channel_pool.unclaimed_channels()
for channel in remaining_channels:
if channel.cluster.cluster_id in zha_regs.CHANNEL_ONLY_CLUSTERS:
channel_pool.claim_channels([channel])
continue
component = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.get(
channel.cluster.cluster_id
)
if component is None:
for cluster_class, match in single_input_clusters.items():
if isinstance(channel.cluster, cluster_class):
component = match
break
self.probe_single_cluster(component, channel, channel_pool)
# until we can get rid off registries
self.handle_on_off_output_cluster_exception(channel_pool)
@staticmethod
def probe_single_cluster(
component: str,
channel: zha_typing.ChannelType,
ep_channels: zha_typing.ChannelPoolType,
) -> None:
"""Probe specified cluster for specific component."""
if component is None or component not in zha_const.PLATFORMS:
return
channel_list = [channel]
unique_id = f"{ep_channels.unique_id}-{channel.cluster.cluster_id}"
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, ep_channels.manufacturer, ep_channels.model, channel_list
)
if entity_class is None:
return
ep_channels.claim_channels(claimed)
ep_channels.async_new_entity(component, entity_class, unique_id, claimed)
def handle_on_off_output_cluster_exception(
self, ep_channels: zha_typing.ChannelPoolType
) -> None:
"""Process output clusters of the endpoint."""
profile_id = ep_channels.endpoint.profile_id
device_type = ep_channels.endpoint.device_type
if device_type in zha_regs.REMOTE_DEVICE_TYPES.get(profile_id, []):
return
for cluster_id, cluster in ep_channels.endpoint.out_clusters.items():
component = zha_regs.SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.get(
cluster.cluster_id
)
if component is None:
continue
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
channel = channel_class(cluster, ep_channels)
self.probe_single_cluster(component, channel, ep_channels)
def initialize(self, hass: HomeAssistantType) -> None:
"""Update device overrides config."""
zha_config = hass.data[zha_const.DATA_ZHA].get(zha_const.DATA_ZHA_CONFIG, {})
overrides = zha_config.get(zha_const.CONF_DEVICE_CONFIG)
if overrides:
self._device_configs.update(overrides)
class GroupProbe:
"""Determine the appropriate component for a group."""
def __init__(self):
"""Initialize instance."""
self._hass = None
self._unsubs = []
def initialize(self, hass: HomeAssistantType) -> None:
"""Initialize the group probe."""
self._hass = hass
self._unsubs.append(
async_dispatcher_connect(
hass, zha_const.SIGNAL_GROUP_ENTITY_REMOVED, self._reprobe_group
)
)
def cleanup(self):
"""Clean up on when zha shuts down."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
def _reprobe_group(self, group_id: int) -> None:
"""Reprobe a group for entities after its members change."""
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
zha_group = zha_gateway.groups.get(group_id)
if zha_group is None:
return
self.discover_group_entities(zha_group)
@callback
def discover_group_entities(self, group: zha_typing.ZhaGroupType) -> None:
"""Process a group and create any entities that are needed."""
# only create a group entity if there are 2 or more members in a group
if len(group.members) < 2:
_LOGGER.debug(
"Group: %s:0x%04x has less than 2 members - skipping entity discovery",
group.name,
group.group_id,
)
return
entity_domains = GroupProbe.determine_entity_domains(self._hass, group)
if not entity_domains:
return
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
for domain in entity_domains:
entity_class = zha_regs.ZHA_ENTITIES.get_group_entity(domain)
if entity_class is None:
continue
self._hass.data[zha_const.DATA_ZHA][domain].append(
(
entity_class,
(
group.get_domain_entity_ids(domain),
f"{domain}_zha_group_0x{group.group_id:04x}",
group.group_id,
zha_gateway.coordinator_zha_device,
),
)
)
async_dispatcher_send(self._hass, zha_const.SIGNAL_ADD_ENTITIES)
@staticmethod
def determine_entity_domains(
hass: HomeAssistantType, group: zha_typing.ZhaGroupType
) -> list[str]:
"""Determine the entity domains for this group."""
entity_domains: list[str] = []
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
all_domain_occurrences = []
for member in group.members:
if member.device.is_coordinator:
continue
entities = async_entries_for_device(
zha_gateway.ha_entity_registry,
member.device.device_id,
include_disabled_entities=True,
)
all_domain_occurrences.extend(
[
entity.domain
for entity in entities
if entity.domain in zha_regs.GROUP_ENTITY_DOMAINS
]
)
if not all_domain_occurrences:
return entity_domains
# get all domains we care about if there are more than 2 entities of this domain
counts = Counter(all_domain_occurrences)
entity_domains = [domain[0] for domain in counts.items() if domain[1] >= 2]
_LOGGER.debug(
"The entity domains are: %s for group: %s:0x%04x",
entity_domains,
group.name,
group.group_id,
)
return entity_domains
PROBE = ProbeEndpoint()
GROUP_PROBE = GroupProbe()
|
w1ll1am23/home-assistant
|
homeassistant/components/zha/core/discovery.py
|
Python
|
apache-2.0
| 9,849
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for observation transformation and base classes for their users."""
import collections
import enum
from typing import Any, Dict, NamedTuple, Optional, Sequence, Tuple
from dm_env import specs
import jax.numpy as jnp
import numpy as np
import tree
from diplomacy.environment import action_utils
from diplomacy.environment import observation_utils as utils
from diplomacy.environment import province_order
from diplomacy.environment import tree_utils
class ObservationTransformState(NamedTuple):
# Board state at the last moves phase.
previous_board_state: np.ndarray
# Most recent board state.
last_action_board_state: np.ndarray
# Actions taken since the last moves phase.
actions_since_previous_moves_phase: np.ndarray
# Records if last phase was a moves phase.
last_phase_was_moves: bool
def update_state(
observation: utils.Observation,
prev_state: Optional[ObservationTransformState]
) -> ObservationTransformState:
"""Returns an updated state for alliance features."""
if prev_state is None:
last_phase_was_moves = False
last_board_state = None
previous_board_state = np.zeros(shape=utils.OBSERVATION_BOARD_SHAPE)
actions_since_previous_moves_phase = np.full((utils.NUM_AREAS, 3),
-1,
dtype=np.int32)
else:
(previous_board_state, last_board_state, actions_since_previous_moves_phase,
last_phase_was_moves) = prev_state
actions_since_previous_moves_phase = actions_since_previous_moves_phase.copy()
if last_phase_was_moves:
actions_since_previous_moves_phase[:] = -1
last_phase_was_moves = False
actions_since_previous_moves_phase[:] = np.roll(
actions_since_previous_moves_phase, axis=1, shift=-1)
actions_since_previous_moves_phase[:, -1] = -1
for action in observation.last_actions:
order_type, (province_id, coast), _, _ = action_utils.action_breakdown(
action)
if order_type == action_utils.WAIVE:
continue
elif order_type == action_utils.BUILD_ARMY:
area = utils.area_from_province_id_and_area_index(province_id, 0)
elif order_type == action_utils.BUILD_FLEET:
if utils.obs_index_start_and_num_areas(province_id)[1] == 3:
area = utils.area_from_province_id_and_area_index(
province_id, coast + 1)
else:
area = utils.area_from_province_id_and_area_index(province_id, 0)
else:
area = utils.area_id_for_unit_in_province_id(province_id,
last_board_state)
assert actions_since_previous_moves_phase[area, -1] == -1
actions_since_previous_moves_phase[area, -1] = action >> 48
if observation.season.is_moves():
previous_board_state = observation.board
# On the next season, update the alliance features.
last_phase_was_moves = True
return ObservationTransformState(previous_board_state,
observation.board,
actions_since_previous_moves_phase,
last_phase_was_moves)
class TopologicalIndexing(enum.Enum):
NONE = 0
MILA = 1
MILA_TOPOLOGICAL_ORDER = [
'YOR', 'EDI', 'LON', 'LVP', 'NTH', 'WAL', 'CLY', 'NWG', 'ECH', 'IRI', 'NAO',
'BEL', 'DEN', 'HEL', 'HOL', 'NWY', 'SKA', 'BAR', 'BRE', 'MAO', 'PIC', 'BUR',
'RUH', 'BAL', 'KIE', 'SWE', 'FIN', 'STP', 'STP/NC', 'GAS', 'PAR', 'NAF',
'POR', 'SPA', 'SPA/NC', 'SPA/SC', 'WES', 'MAR', 'MUN', 'BER', 'GOB', 'LVN',
'PRU', 'STP/SC', 'MOS', 'TUN', 'GOL', 'TYS', 'PIE', 'BOH', 'SIL', 'TYR',
'WAR', 'SEV', 'UKR', 'ION', 'TUS', 'NAP', 'ROM', 'VEN', 'GAL', 'VIE', 'TRI',
'ARM', 'BLA', 'RUM', 'ADR', 'AEG', 'ALB', 'APU', 'EAS', 'GRE', 'BUD', 'SER',
'ANK', 'SMY', 'SYR', 'BUL', 'BUL/EC', 'CON', 'BUL/SC']
mila_topological_index = province_order.topological_index(
province_order.get_mdf_content(province_order.MapMDF.BICOASTAL_MAP),
MILA_TOPOLOGICAL_ORDER)
class GeneralObservationTransformer:
"""A general observation transformer class.
Additional fields should default to False to avoid changing existing
configs. Additional arguments to the obs transform functions that support
optional fields must be keyword-only arguments.
"""
def __init__(
self,
*,
rng_key: Optional[jnp.ndarray],
board_state: bool = True,
last_moves_phase_board_state: bool = True,
actions_since_last_moves_phase: bool = True,
season: bool = True,
build_numbers: bool = True,
topological_indexing: TopologicalIndexing = TopologicalIndexing.NONE,
areas: bool = True,
last_action: bool = True,
legal_actions_mask: bool = True,
temperature: bool = True,
) -> None:
"""Constructor which configures the fields the transformer will return.
Each argument represents whether a particular field should be included in
the observation, except for topological indexing.
Args:
rng_key: A Jax random number generator key, for use if an observation
transformation is ever stochastic.
board_state: Flag for whether to include the current board state,
an array containing current unit positions, dislodged units, supply
centre ownership, and where units may be removed or built.
last_moves_phase_board_state: Flag for whether to include the board state
at the start of the last moves phase. If actions_since_last_moves is
True, this board state is necessary to give context to the actions.
actions_since_last_moves_phase: Flag for whether to include the actions
since the last moves phase. These are given by area, with 3 channels
(for moves, retreats and builds phases). If there was no action in the
area, then the field is a 0.
season: Flag for whether to include the current season in the observation.
There are five seasons, as listed in observation_utils.Season.
build_numbers: Flag for whether to include the number of builds/disbands
each player has. Always 0 except in a builds phase.
topological_indexing: When choosing unit actions in sequence, the order
they are chosen is determined by the order step_observations sort the
areas by. This config determines that ordering. NONE orders them
according to the area index in the observation, MILA uses the same
ordering as in Pacquette et al.
areas: Flag for whether to include a vector of length NUM_AREAS, which is
True in the area that the next unit-action will be chosen for.
last_action: Flag for whether to include the action chosen in the previous
unit-action selection in the input. This is used e.g. by teacher
forcing. When sampling from a network, it can use the sample it drew in
the previous step of the policy head.
legal_actions_mask: Flag for whether to include a mask of which actions
are legal. It will be based on the consecutive action indexes, and have
length constants.MAX_ACTION_INDEX.
temperature: Flag for whether to include a sampling temperature in the
neural network input.
"""
self._rng_key = rng_key
self.board_state = board_state
self.last_moves_phase_board_state = last_moves_phase_board_state
self.actions_since_last_moves_phase = actions_since_last_moves_phase
self.season = season
self.build_numbers = build_numbers
self.areas = areas
self.last_action = last_action
self.legal_actions_mask = legal_actions_mask
self.temperature = temperature
self._topological_indexing = topological_indexing
def initial_observation_spec(
self,
num_players: int
) -> Dict[str, specs.Array]:
"""Returns a spec for the output of initial_observation_transform."""
spec = collections.OrderedDict()
if self.board_state:
spec['board_state'] = specs.Array(
(utils.NUM_AREAS, utils.PROVINCE_VECTOR_LENGTH), dtype=np.float32)
if self.last_moves_phase_board_state:
spec['last_moves_phase_board_state'] = specs.Array(
(utils.NUM_AREAS, utils.PROVINCE_VECTOR_LENGTH), dtype=np.float32)
if self.actions_since_last_moves_phase:
spec['actions_since_last_moves_phase'] = specs.Array(
(utils.NUM_AREAS, 3), dtype=np.int32)
if self.season:
spec['season'] = specs.Array((), dtype=np.int32)
if self.build_numbers:
spec['build_numbers'] = specs.Array((num_players,), dtype=np.int32)
return spec
def initial_observation_transform(
self,
observation: utils.Observation,
prev_state: Optional[ObservationTransformState]
) -> Tuple[Dict[str, jnp.ndarray], ObservationTransformState]:
"""Constructs initial Network observations and state.
See initial_observation_spec for array sizes, and the README for details on
how to construct each field.
Please implement your observation_test to check that these are constructed
properly.
Args:
observation: Parsed observation from environment
prev_state: previous ObservationTransformState
Returns:
initial observations and inital state.
"""
next_state = update_state(observation, prev_state)
initial_observation = collections.OrderedDict()
if self.board_state:
initial_observation['board_state'] = np.array(observation.board,
dtype=np.float32)
if self.last_moves_phase_board_state:
initial_observation['last_moves_phase_board_state'] = np.array(
prev_state.previous_board_state if prev_state else
observation.board, dtype=np.float32)
if self.actions_since_last_moves_phase:
initial_observation['actions_since_last_moves_phase'] = np.cast[np.int32](
next_state.actions_since_previous_moves_phase)
if self.season:
initial_observation['season'] = np.cast[np.int32](
observation.season.value)
if self.build_numbers:
initial_observation['build_numbers'] = np.array(
observation.build_numbers, dtype=np.int32)
return initial_observation, next_state
def step_observation_spec(
self
) -> Dict[str, specs.Array]:
"""Returns a spec for the output of step_observation_transform."""
spec = collections.OrderedDict()
if self.areas:
spec['areas'] = specs.Array(shape=(utils.NUM_AREAS,), dtype=bool)
if self.last_action:
spec['last_action'] = specs.Array(shape=(), dtype=np.int32)
if self.legal_actions_mask:
spec['legal_actions_mask'] = specs.Array(
shape=(action_utils.MAX_ACTION_INDEX,), dtype=np.uint8)
if self.temperature:
spec['temperature'] = specs.Array(shape=(1,), dtype=np.float32)
return spec
def step_observation_transform(
self,
transformed_initial_observation: Dict[str, jnp.ndarray],
legal_actions: Sequence[jnp.ndarray],
slot: int,
last_action: int,
area: int,
step_count: int,
previous_area: Optional[int],
temperature: float
) -> Dict[str, jnp.ndarray]:
"""Converts raw step obs. from the diplomacy env. to network inputs.
See step_observation_spec for array sizes, and the README for details on
how to construct each field.
Please implement your observation_test to check that these are constructed
properly.
Args:
transformed_initial_observation: Initial observation made with same config
legal_actions: legal actions for all players this turn
slot: the slot/player_id we are creating the obs for
last_action: the player's last action (used for teacher forcing)
area: the area to create an action for
step_count: how many unit actions have been created so far
previous_area: the area for the previous unit action
temperature: the sampling temperature for unit actions
Returns:
The step observation.
"""
del previous_area # Unused
# Areas to sum over.
areas = np.zeros(shape=(utils.NUM_AREAS,), dtype=bool)
if area == utils.INVALID_AREA_FLAG:
raise NotImplementedError('network requires area ordering to be '
'specified')
if area == utils.BUILD_PHASE_AREA_FLAG:
build_numbers = transformed_initial_observation['build_numbers']
board = transformed_initial_observation['board_state']
legal_actions_list = legal_actions[slot]
if build_numbers[slot] > 0:
player_areas = utils.build_areas(slot, board)
else:
player_areas = utils.removable_areas(slot, board)
areas[player_areas] = True
else:
province, _ = utils.province_id_and_area_index(area)
legal_actions_list = action_utils.actions_for_province(
legal_actions[slot], province)
areas[area] = True
if not legal_actions_list:
raise ValueError('No legal actions found for area {}'.format(area))
legal_actions_mask = np.full(action_utils.MAX_ACTION_INDEX, False)
legal_actions_mask[action_utils.action_index(
np.array(legal_actions_list))] = True
step_obs = collections.OrderedDict()
if self.areas:
step_obs['areas'] = areas
if self.last_action:
step_obs['last_action'] = np.array(
action_utils.shrink_actions(last_action if step_count else -1),
dtype=np.int32)
if self.legal_actions_mask:
step_obs['legal_actions_mask'] = legal_actions_mask
if self.temperature:
step_obs['temperature'] = np.array([temperature], dtype=np.float32)
return step_obs
def observation_spec(
self,
num_players: int
) -> Tuple[Dict[str, specs.Array], Dict[str, specs.Array], specs.Array]:
"""Returns a spec for the output of observation_transform."""
return (
self.initial_observation_spec(num_players), # Initial
tree.map_structure(
lambda x: x.replace( # pylint: disable=g-long-lambda
shape=(num_players, action_utils.MAX_ORDERS) + x.shape),
self.step_observation_spec()), # Step Observations
specs.Array((num_players,), dtype=np.int32)) # Sequence Lengths
def zero_observation(self, num_players):
return tree.map_structure(lambda spec: spec.generate_value(),
self.observation_spec(num_players))
def observation_transform(
self,
*,
observation: utils.Observation,
legal_actions: Sequence[np.ndarray],
slots_list: Sequence[int],
prev_state: Any,
temperature: float,
area_lists: Optional[Sequence[Sequence[int]]] = None,
forced_actions: Optional[Sequence[Sequence[int]]] = None,
) -> Tuple[Tuple[Dict[str, jnp.ndarray],
Dict[str, jnp.ndarray], Sequence[int]],
ObservationTransformState]:
"""Transform the observation into the format required by Network policies.
Args:
observation: Observation from environment
legal_actions: legal actions for all players this turn
slots_list: the slots/player_ids we are creating obs for
prev_state: previous ObservationTransformState
temperature: the sampling temperature for unit actions
area_lists: Order to process areas in. None for a default ordering.
forced_actions: actions from teacher forcing. None when sampling.
Returns:
(initial_observation, stacked_step_observations,
step_observation_sequence_lengths), next_obs_transform_state
"""
if area_lists is None:
area_lists = []
for player in slots_list:
topo_index = self._topological_index()
area_lists.append(
utils.order_relevant_areas(observation, player, topo_index))
initial_observation, next_state = self.initial_observation_transform(
observation, prev_state)
num_players = len(legal_actions)
sequence_lengths = np.zeros(shape=(num_players,), dtype=np.int32)
zero_step_obs = tree.map_structure(
specs.Array.generate_value,
self.step_observation_spec()
)
step_observations = [[zero_step_obs] * action_utils.MAX_ORDERS
for _ in range(num_players)]
if len(slots_list) != len(area_lists):
raise ValueError('area_lists and slots_list different lengths')
for player, area_list in zip(
slots_list, area_lists):
sequence_lengths[player] = len(area_list)
previous_area = utils.INVALID_AREA_FLAG # No last action on 1st iteration
for i, area in enumerate(area_list):
last_action = 0
if forced_actions is not None and i > 0:
# Find the right last action, in case the forced actions are not in
# the order this network produces actions.
if area in (utils.INVALID_AREA_FLAG, utils.BUILD_PHASE_AREA_FLAG):
last_action = forced_actions[player][i - 1]
else:
# Find the action with the right area.
last_action = action_utils.find_action_with_area(
forced_actions[player], previous_area)
step_observations[player][i] = self.step_observation_transform(
initial_observation, legal_actions, player, last_action, area, i,
previous_area, temperature)
previous_area = area
stacked_step_obs_per_player = []
for player in range(num_players):
stacked_step_obs_per_player.append(
tree_utils.tree_stack(step_observations[player]))
stacked_step_obs = tree_utils.tree_stack(stacked_step_obs_per_player)
return (initial_observation, stacked_step_obs, sequence_lengths), next_state
def _topological_index(self):
"""Returns the order in which to produce orders from different areas.
If None, the order in the observation will be used.
Returns:
A list of areas
Raises:
RuntimeError: on hitting unexpected branch
"""
if self._topological_indexing == TopologicalIndexing.NONE:
return None
elif self._topological_indexing == TopologicalIndexing.MILA:
return mila_topological_index
else:
raise RuntimeError('Unexpected Branch')
|
deepmind/diplomacy
|
environment/observation_transformation.py
|
Python
|
apache-2.0
| 18,827
|
#!/usr/bin/env python3
import stegpng
from sys import argv
img = stegpng.open(argv[1], ignore_signature=False)
#for chunk in img.chunks:
#print(chunk.type)
#print(chunk.is_supported())
#print('='*50)
def png_chunk_summary(chunk):
print(chunk.type)
if not chunk.is_supported():
print('Unsuported chunk: {}------------------------------------------------------'.format(chunk.type))
return
print("Valid: {}".format(chunk.is_valid()))
if chunk.type not in ('PLTE', 'IDAT'):
for key, val in chunk.get_payload().items():
print("{}: {}".format(key, val))
idats = []
for indice, chunk in enumerate(img.chunks):
if chunk.type == 'IDAT':
idats.append(chunk)
continue
else:
if len(idats) == 1:
print("Report for chunk {}:".format(indice - 1))
png_chunk_summary(idats[0])
print('='*50)
elif len(idats) > 1:
print("Hidding {} IDAT chunks".format(len(idats)))
idats = []
print("Report for chunk {}".format(indice))
png_chunk_summary(chunk)
print('='*50)
|
WHGhost/stegPNG
|
tests/summarize.py
|
Python
|
gpl-3.0
| 1,122
|
from spydht.spydht import DHT
import nacl.signing
import time
import hashlib
host1, port1 = 'localhost', 31000
key2 = nacl.signing.SigningKey.generate()
host2, port2 = 'localhost', 3101
dht2 = DHT(host2, port2, key2, boot_host=host1, boot_port=port1, wan_ip="127.0.0.1")
time.sleep(5)
content = "x"
id = "test"
dht2[id] = content
time.sleep(2)
print(dht2.buckets.buckets)
key = hashlib.sha256(id.encode("ascii") + content.encode("ascii")).hexdigest()
print("hererere")
print(dht2.data)
try:
print(dht2[key])
except:
pass
print("After .")
print("what key should be")
print(int(key, 16))
content = "new"
id = "new content"
dht2[{"old_key": int(key, 16), "id": "new content"}] = content
key = hashlib.sha256(id.encode("ascii") + content.encode("ascii")).hexdigest()
print("new key: ")
print(int(key, 16))
print(dht2[key])
while True:
time.sleep(1)
|
robertsdotpm/spydht
|
node_2.py
|
Python
|
bsd-2-clause
| 889
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def metric_accessors():
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
# regression
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# r2
r21 = gbm.r2(train=True, valid=False, xval=False)
assert isinstance(r21, float)
r22 = gbm.r2(train=False, valid=True, xval=False)
assert isinstance(r22, float)
r23 = gbm.r2(train=False, valid=False, xval=True)
assert isinstance(r23, float)
r2 = gbm.r2(train=True, valid=True, xval=False)
assert "train" in r2.keys() and "valid" in r2.keys(), "expected training and validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["valid"]))
assert r2["valid"] == r22
r2 = gbm.r2(train=True, valid=False, xval=True)
assert "train" in r2.keys() and "xval" in r2.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["xval"]))
assert r2["xval"] == r23
r2 = gbm.r2(train=True, valid=True, xval=True)
assert "train" in r2.keys() and "valid" in r2.keys() and "xval" in r2.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(r2["train"]), type(r2["valid"]), type(r2["xval"]))
r2 = gbm.r2(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(r2, float)
assert r2 == r21
r2 = gbm.r2(train=False, valid=True, xval=True)
assert "valid" in r2.keys() and "xval" in r2.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["valid"]), type(r2["xval"]))
# mean_residual_deviance
mean_residual_deviance1 = gbm.mean_residual_deviance(train=True, valid=False, xval=False)
assert isinstance(mean_residual_deviance1, float)
mean_residual_deviance2 = gbm.mean_residual_deviance(train=False, valid=True, xval=False)
assert isinstance(mean_residual_deviance2, float)
mean_residual_deviance3 = gbm.mean_residual_deviance(train=False, valid=False, xval=True)
assert isinstance(mean_residual_deviance3, float)
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=False)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys(), "expected training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]))
assert mean_residual_deviance["valid"] == mean_residual_deviance2
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=False, xval=True)
assert "train" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["xval"]))
assert mean_residual_deviance["xval"] == mean_residual_deviance3
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=True)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mean_residual_deviance, float)
assert mean_residual_deviance == mean_residual_deviance1
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=True, xval=True)
assert "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
# binomial
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
distribution = "bernoulli"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col], x=train[predictors], validation_y=valid[response_col], validation_x=valid[predictors], nfolds=3, distribution=distribution, fold_assignment="Random")
# auc
auc1 = gbm.auc(train=True, valid=False, xval=False)
assert isinstance(auc1, float)
auc2 = gbm.auc(train=False, valid=True, xval=False)
assert isinstance(auc2, float)
auc3 = gbm.auc(train=False, valid=False, xval=True)
assert isinstance(auc3, float)
auc = gbm.auc(train=True, valid=True, xval=False)
assert "train" in auc.keys() and "valid" in auc.keys(), "expected training and validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["valid"]))
assert auc["valid"] == auc2
auc = gbm.auc(train=True, valid=False, xval=True)
assert "train" in auc.keys() and "xval" in auc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["xval"]))
assert auc["xval"] == auc3
auc = gbm.auc(train=True, valid=True, xval=True)
assert "train" in auc.keys() and "valid" in auc.keys() and "xval" in auc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(auc["train"]), type(auc["valid"]), type(auc["xval"]))
auc = gbm.auc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(auc, float)
assert auc == auc1
auc = gbm.auc(train=False, valid=True, xval=True)
assert "valid" in auc.keys() and "xval" in auc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["valid"]), type(auc["xval"]))
# roc
(fprs1, tprs1) = gbm.roc(train=True, valid=False, xval=False)
assert isinstance(fprs1, list)
assert isinstance(tprs1, list)
(fprs2, tprs2) = gbm.roc(train=False, valid=True, xval=False)
assert isinstance(fprs2, list)
assert isinstance(tprs2, list)
(fprs3, tprs3) = gbm.roc(train=False, valid=False, xval=True)
assert isinstance(fprs3, list)
assert isinstance(tprs3, list)
roc = gbm.roc(train=True, valid=True, xval=False)
assert "train" in roc.keys() and "valid" in roc.keys(), "expected training and validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple), "expected training and validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["valid"]))
assert roc["valid"][0] == fprs2
assert roc["valid"][1] == tprs2
roc = gbm.roc(train=True, valid=False, xval=True)
assert "train" in roc.keys() and "xval" in roc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["xval"], tuple), "expected training and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["xval"]))
assert roc["xval"][0] == fprs3
assert roc["xval"][1] == tprs3
roc = gbm.roc(train=True, valid=True, xval=True)
assert "train" in roc.keys() and "valid" in roc.keys() and "xval" in roc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "expected training, validation, and cross validation metrics to be tuples, but got {0}, {1}, and {2}".format(type(roc["train"]), type(roc["valid"]), type(roc["xval"]))
(fprs, tprs) = gbm.roc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(fprs, list)
assert isinstance(tprs, list)
assert fprs == fprs1
assert tprs == tprs1
roc = gbm.roc(train=False, valid=True, xval=True)
assert "valid" in roc.keys() and "xval" in roc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert len(roc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(roc.keys())
assert isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "validation and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["valid"]), type(roc["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# giniCoef
giniCoef1 = gbm.giniCoef(train=True, valid=False, xval=False)
assert isinstance(giniCoef1, float)
giniCoef2 = gbm.giniCoef(train=False, valid=True, xval=False)
assert isinstance(giniCoef2, float)
giniCoef3 = gbm.giniCoef(train=False, valid=False, xval=True)
assert isinstance(giniCoef3, float)
giniCoef = gbm.giniCoef(train=True, valid=True, xval=False)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys(), "expected training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["valid"]))
assert giniCoef["valid"] == giniCoef2
giniCoef = gbm.giniCoef(train=True, valid=False, xval=True)
assert "train" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["xval"]))
assert giniCoef["xval"] == giniCoef3
giniCoef = gbm.giniCoef(train=True, valid=True, xval=True)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(giniCoef["train"]), type(giniCoef["valid"]), type(giniCoef["xval"]))
giniCoef = gbm.giniCoef(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(giniCoef, float)
assert giniCoef == giniCoef1
giniCoef = gbm.giniCoef(train=False, valid=True, xval=True)
assert "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["valid"]), type(giniCoef["xval"]))
# F1
F11 = gbm.F1(train=True, valid=False, xval=False)
F12 = gbm.F1(train=False, valid=True, xval=False)
F13 = gbm.F1(train=False, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=False)
F1 = gbm.F1(train=True, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=True)
F1 = gbm.F1(train=False, valid=False, xval=False) # default: return training metrics
F1 = gbm.F1(train=False, valid=True, xval=True)
# F0point5
F0point51 = gbm.F0point5(train=True, valid=False, xval=False)
F0point52 = gbm.F0point5(train=False, valid=True, xval=False)
F0point53 = gbm.F0point5(train=False, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=False)
F0point5 = gbm.F0point5(train=True, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=True)
F0point5 = gbm.F0point5(train=False, valid=False, xval=False) # default: return training metrics
F0point5 = gbm.F0point5(train=False, valid=True, xval=True)
# F2
F21 = gbm.F2(train=True, valid=False, xval=False)
F22 = gbm.F2(train=False, valid=True, xval=False)
F23 = gbm.F2(train=False, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=False)
F2 = gbm.F2(train=True, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=True)
F2 = gbm.F2(train=False, valid=False, xval=False) # default: return training metrics
F2 = gbm.F2(train=False, valid=True, xval=True)
# accuracy
accuracy1 = gbm.accuracy(train=True, valid=False, xval=False)
accuracy2 = gbm.accuracy(train=False, valid=True, xval=False)
accuracy3 = gbm.accuracy(train=False, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=False)
accuracy = gbm.accuracy(train=True, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=True)
accuracy = gbm.accuracy(train=False, valid=False, xval=False) # default: return training metrics
accuracy = gbm.accuracy(train=False, valid=True, xval=True)
# error
error1 = gbm.error(train=True, valid=False, xval=False)
error2 = gbm.error(train=False, valid=True, xval=False)
error3 = gbm.error(train=False, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=False)
error = gbm.error(train=True, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=True)
error = gbm.error(train=False, valid=False, xval=False) # default: return training metrics
error = gbm.error(train=False, valid=True, xval=True)
# precision
precision1 = gbm.precision(train=True, valid=False, xval=False)
precision2 = gbm.precision(train=False, valid=True, xval=False)
precision3 = gbm.precision(train=False, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=False)
precision = gbm.precision(train=True, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=True)
precision = gbm.precision(train=False, valid=False, xval=False) # default: return training metrics
precision = gbm.precision(train=False, valid=True, xval=True)
# mcc
mcc1 = gbm.mcc(train=True, valid=False, xval=False)
mcc2 = gbm.mcc(train=False, valid=True, xval=False)
mcc3 = gbm.mcc(train=False, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=False)
mcc = gbm.mcc(train=True, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=True)
mcc = gbm.mcc(train=False, valid=False, xval=False) # default: return training metrics
mcc = gbm.mcc(train=False, valid=True, xval=True)
# max_per_class_error
max_per_class_error1 = gbm.max_per_class_error(train=True, valid=False, xval=False)
max_per_class_error2 = gbm.max_per_class_error(train=False, valid=True, xval=False)
max_per_class_error3 = gbm.max_per_class_error(train=False, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=False)
max_per_class_error = gbm.max_per_class_error(train=True, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=True)
max_per_class_error = gbm.max_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
max_per_class_error = gbm.max_per_class_error(train=False, valid=True, xval=True)
# confusion_matrix
confusion_matrix1 = gbm.confusion_matrix(train=True, valid=False, xval=False)
confusion_matrix2 = gbm.confusion_matrix(train=False, valid=True, xval=False)
confusion_matrix3 = gbm.confusion_matrix(train=False, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=False)
confusion_matrix = gbm.confusion_matrix(train=True, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=True)
confusion_matrix = gbm.confusion_matrix(train=False, valid=False, xval=False) # default: return training metrics
confusion_matrix = gbm.confusion_matrix(train=False, valid=True, xval=True)
# # plot
# plot1 = gbm.plot(train=True, valid=False, xval=False)
# plot2 = gbm.plot(train=False, valid=True, xval=False)
# plot3 = gbm.plot(train=False, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=False)
# plot = gbm.plot(train=True, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=True)
# plot = gbm.plot(train=False, valid=False, xval=False) # default: return training metrics
# plot = gbm.plot(train=False, valid=True, xval=True)
# # tpr
# tpr1 = gbm.tpr(train=True, valid=False, xval=False)
# tpr2 = gbm.tpr(train=False, valid=True, xval=False)
# tpr3 = gbm.tpr(train=False, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=False)
# tpr = gbm.tpr(train=True, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=True)
# tpr = gbm.tpr(train=False, valid=False, xval=False) # default: return training metrics
# tpr = gbm.tpr(train=False, valid=True, xval=True)
#
# # tnr
# tnr1 = gbm.tnr(train=True, valid=False, xval=False)
# tnr2 = gbm.tnr(train=False, valid=True, xval=False)
# tnr3 = gbm.tnr(train=False, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=False)
# tnr = gbm.tnr(train=True, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=True)
# tnr = gbm.tnr(train=False, valid=False, xval=False) # default: return training metrics
# tnr = gbm.tnr(train=False, valid=True, xval=True)
#
# # fnr
# fnr1 = gbm.fnr(train=True, valid=False, xval=False)
# fnr2 = gbm.fnr(train=False, valid=True, xval=False)
# fnr3 = gbm.fnr(train=False, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=False)
# fnr = gbm.fnr(train=True, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=True)
# fnr = gbm.fnr(train=False, valid=False, xval=False) # default: return training metrics
# fnr = gbm.fnr(train=False, valid=True, xval=True)
#
# # fpr
# fpr1 = gbm.fpr(train=True, valid=False, xval=False)
# fpr2 = gbm.fpr(train=False, valid=True, xval=False)
# fpr3 = gbm.fpr(train=False, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=False)
# fpr = gbm.fpr(train=True, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=True)
# fpr = gbm.fpr(train=False, valid=False, xval=False) # default: return training metrics
# fpr = gbm.fpr(train=False, valid=True, xval=True)
# multinomial
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
distribution = "multinomial"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# hit_ratio_table
hit_ratio_table1 = gbm.hit_ratio_table(train=True, valid=False, xval=False)
hit_ratio_table2 = gbm.hit_ratio_table(train=False, valid=True, xval=False)
hit_ratio_table3 = gbm.hit_ratio_table(train=False, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=False)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=False, xval=False) # default: return training metrics
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=True, xval=True)
# clustering
iris = h2o.import_file(path=tests.locate("smalldata/iris/iris.csv"))
km = h2o.kmeans(x=iris[0:4],
nfolds=3,
k=3)
# betweenss
betweenss1 = km.betweenss(train=True, valid=False, xval=False)
assert isinstance(betweenss1, float)
betweenss3 = km.betweenss(train=False, valid=False, xval=True)
assert isinstance(betweenss3, float)
betweenss = km.betweenss(train=True, valid=False, xval=True)
assert "train" in betweenss.keys() and "xval" in betweenss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert len(betweenss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert isinstance(betweenss["train"], float) and isinstance(betweenss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(betweenss["train"]), type(betweenss["xval"]))
assert betweenss["xval"] == betweenss3
betweenss = km.betweenss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(betweenss, float)
assert betweenss == betweenss1
# totss
totss1 = km.totss(train=True, valid=False, xval=False)
assert isinstance(totss1, float)
totss3 = km.totss(train=False, valid=False, xval=True)
assert isinstance(totss3, float)
totss = km.totss(train=True, valid=False, xval=True)
assert "train" in totss.keys() and "xval" in totss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert len(totss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert isinstance(totss["train"], float) and isinstance(totss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(totss["train"]), type(totss["xval"]))
assert totss["xval"] == totss3
totss = km.totss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(totss, float)
assert totss == totss1
# tot_withinss
tot_withinss1 = km.tot_withinss(train=True, valid=False, xval=False)
assert isinstance(tot_withinss1, float)
tot_withinss3 = km.tot_withinss(train=False, valid=False, xval=True)
assert isinstance(tot_withinss3, float)
tot_withinss = km.tot_withinss(train=True, valid=False, xval=True)
assert "train" in tot_withinss.keys() and "xval" in tot_withinss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert len(tot_withinss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert isinstance(tot_withinss["train"], float) and isinstance(tot_withinss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(tot_withinss["train"]), type(tot_withinss["xval"]))
assert tot_withinss["xval"] == tot_withinss3
tot_withinss = km.tot_withinss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(tot_withinss, float)
assert tot_withinss == tot_withinss1
# withinss
withinss1 = km.withinss(train=True, valid=False, xval=False)
withinss3 = km.withinss(train=False, valid=False, xval=True)
withinss = km.withinss(train=True, valid=False, xval=True)
withinss = km.withinss(train=False, valid=False, xval=False) # default: return training metrics
# centroid_stats
centroid_stats1 = km.centroid_stats(train=True, valid=False, xval=False)
centroid_stats3 = km.centroid_stats(train=False, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=True, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=False, valid=False, xval=False) # default: return training metrics
# size
size1 = km.size(train=True, valid=False, xval=False)
size3 = km.size(train=False, valid=False, xval=True)
size = km.size(train=True, valid=False, xval=True)
size = km.size(train=False, valid=False, xval=False) # default: return training metrics
if __name__ == "__main__":
tests.run_test(sys.argv, metric_accessors)
|
kyoren/https-github.com-h2oai-h2o-3
|
h2o-py/tests/testdir_misc/pyunit_metric_accessors.py
|
Python
|
apache-2.0
| 42,219
|
import logging
from django.core.management.base import BaseCommand
from waldur_rancher.utils import SyncUser
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Sync users from Waldur to Rancher."""
def handle(self, *args, **options):
def print_message(count, action, name='user'):
if count == 1:
self.stdout.write(
self.style.SUCCESS('%s %s has been %s.' % (count, name, action))
)
else:
self.stdout.write(
self.style.SUCCESS('%s %ss have been %s.' % (count, name, action))
)
result = SyncUser.run()
for action in ['blocked', 'created', 'activated', 'updated']:
print_message(result.get(action, 0), action)
print_message(result.get('project roles deleted', 0), 'deleted', 'project role')
print_message(result('project roles created', 0), 'created', 'project role')
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_rancher/management/commands/sync_users.py
|
Python
|
mit
| 986
|
# Author: Yiheng Zhu
# Date: 31/08/2017
# Description:
# The bandit class
# import libraries
import numpy
class Bandit:
def __init__(self, true_mean):
self.true_mean = true_mean
self.sample_number = 0
self.estimated_mean = 0
# pull the lever of the slot machine
def pull_lever(self):
# return true mean + sample from standard normal distribution (mean = 0, variance = 1)
return (self.true_mean + numpy.random.randn())
# update the estimated mean
def update_estimated_mean(self, new_sample):
self.sample_number += 1
# mean(x_N) = (1/N)*sum_{i=1,...,N}(x_i)
# = (1/N)*sum_{i=1,...,N-1}(x_i) + (1/N)*x_N
# = (1/N)*(N-1)*mean(x_{N-1}) + (1/N)*x_N
# = ((N-1)/N)*mean(x_{N-1}) + (1/N)*x_N
# = (1 - 1/N)*mean(x_{N-1}) + x_N/N
self.estimated_mean = (1.0 - 1.0/self.sample_number)*self.estimated_mean + new_sample/self.sample_number
|
GitYiheng/reinforcement_learning_test
|
test00_previous_files/Bandit.py
|
Python
|
mit
| 901
|
from datetime import datetime
from datetime import timedelta
class timecycle:
# describes the type
typeDescription = 'Creates a timeable instance we can use for timing cycles of things'
def __init__(self):
self.wait_time = 15
self.alarm_start = datetime.now()
self.wait_time_delta = timedelta(minutes=self.wait_time)
self.alarm_time = self.alarm_start + self.wait_time_delta
self.force_alarm = False
def reset_alarm(self):
self.force_alarm = False
self.alarm_start = datetime.now()
self.wait_time_delta = timedelta(minutes=self.wait_time)
self.alarm_time = self.alarm_start + self.wait_time_delta
def is_alarming(self):
now = datetime.now().strftime("%s")
alarm_time = self.alarm_time.strftime("%s")
if now > alarm_time or self.force_alarm:
self.reset_alarm()
return True
else:
return False
def is_alarming_manually_reset(self):
now = datetime.now().strftime("%s")
alarm_time = self.alarm_time.strftime("%s")
if now > alarm_time or self.force_alarm:
return True
else:
return False
|
headstrongsolutions/Jarvis_Screen
|
timecycle.py
|
Python
|
mit
| 1,206
|
#!/usr/bin/env python2.7
import signal
import sys
import threading
from time import sleep
from uthportal import UthPortal
from uthportal.logger import get_logger
from uthportal.networking import ThreadedSocketServer
uth_portal = None
socket_server = None
def signal_handler(signal, frame):
if uth_portal:
uth_portal.logger.info('User interrupt! Exiting....')
uth_portal.stop()
if socket_server:
socket_server.shutdown()
sys.exit(0)
def auth_function(info):
#info should be a means of authentication
#TODO:implement this
if info[0] == 'uthportal' and info[1] == "HardPass123":
return True
else:
return False
def handle_command(command):
return ("Command handled by uthportal :" + command[0], "info")
def main():
#Handle SIGINT
signal.signal(signal.SIGINT, signal_handler)
global uth_portal, socket_server
uth_portal = UthPortal()
uth_portal.start()
uth_portal.logger.info('Uthportal started successfully!')
#uth_portal._force_update()
get_logger('py.warnings', uth_portal.settings)
socket_server = ThreadedSocketServer(uth_portal.settings)
socket_server.set_handle_function(handle_command)
socket_server.set_auth_function(auth_function)
socket_server.listen()
while True:
sleep(2)
if __name__ == '__main__' :
main()
|
kkanellis/uthportal-server
|
uth-portal.py
|
Python
|
gpl-3.0
| 1,371
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
# Shortest Word题目地址:https://www.codewars.com/kata/57cebe1dc6fdc20c57000ac9/train/python
'''
import unittest
class TestCases(unittest.TestCase):
def setUp(self):
pass
def test1(self):self.assertEqual(find_short("bitcoin take over the world maybe who knows perhaps"), 3)
def test2(self):self.assertEqual(find_short("turns out random test cases are easier than writing out basic ones"), 3)
def test3(self):self.assertEqual(find_short("lets talk about javascript the best language"), 3)
def test4(self):self.assertEqual(find_short("i want to travel the world writing code one day"), 1)
def test5(self):self.assertEqual(find_short("Lets all go on holiday somewhere very cold"), 2)
def find_short(s):
return min([len(word) for word in s.split(" ")])
if __name__ == '__main__':
unittest.main()
'''
参考解法:
'''
|
karchi/codewars_kata
|
已完成/Shortest Word.py
|
Python
|
mit
| 936
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
#----------------------------------------------------------------------------
# User-extensible nodes
#----------------------------------------------------------------------------
class UserNodeMeta(type):
def __init__(cls, what, bases=None, dict=None):
super(UserNodeMeta, cls).__init__(what, bases, dict)
cls.actual_name = cls.__name__
cls.__name__ = "UserNode"
def __repr__(cls):
return "<class %s>" % cls.actual_name
class UserNode(ExprNode):
"""
Node that users can subclass and insert in the AST without using mixins
to provide user-specified functionality.
"""
__metaclass__ = UserNodeMeta
_fields = []
def infer_types(self, type_inferer):
"""
Infer the type of this node and set it self.type.
The return value will replace this node in the AST.
"""
raise NotImplementedError
def specialize(self, specializer):
"""
Just before code generation. Useful to rewrite this node in terms
of other existing fundamental operations.
Implementing this method is optional.
"""
specializer.visitchildren(self)
return self
def codegen(self, codegen):
"""
Generate code for this node.
Must return an LLVM Value.
"""
raise NotImplementedError
def __repr__(self):
return "<%s object>" % self.actual_name
class dont_infer(UserNode):
"""
Support delayed type inference of the body. E.g. if you want a portion
<blob> to be inferred elsewhere:
print x
<blob>
print y
If we want to infer <blob> after the last print, but evaluate it before,
we can replace these statements with:
[print x, dont_infer(<blob>), print y, infer_now(<blob>)]
"""
_fields = ["arg"]
def __init__(self, arg):
self.arg = arg
def infer_types(self, type_inferer):
return self
def specialize(self, specializer):
return specializer.visit(self.arg)
class infer_now(UserNode):
"See dont_infer above"
_fields = []
def __init__(self, arg, dont_infer_node):
self.arg = arg
self.dont_infer_node = dont_infer_node
def infer_types(self, type_inferer):
self.dont_infer_node.arg = type_inferer.visit(self.arg)
return None
|
shiquanwang/numba
|
numba/nodes/usernode.py
|
Python
|
bsd-2-clause
| 2,475
|
import json
import os
import numpy as np
import pytest
from hyperspy import signals
from hyperspy.io import load
from hyperspy.misc.test_utils import assert_deep_almost_equal
test_files = ['30x30_instructively_packed_16bit_compressed.bcf',
'16x16_12bit_packed_8bit.bcf',
'P45_the_default_job.bcf',
'test_TEM.bcf',
'Hitachi_TM3030Plus.bcf',
'over16bit.bcf',
'bcf_v2_50x50px.bcf',
'bcf-edx-ebsd.bcf']
np_file = ['30x30_16bit.npy', '30x30_16bit_ds.npy']
spx_files = ['extracted_from_bcf.spx',
'bruker_nano.spx']
my_path = os.path.dirname(__file__)
def test_load_16bit():
# test bcf from hyperspy load function level
# some of functions can be not covered
# it cant use cython parsing implementation, as it is not compiled
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing bcf instructively packed 16bit...')
s = load(filename)
bse, hype = s
# Bruker saves all images in true 16bit:
assert bse.data.dtype == np.uint16
assert bse.data.shape == (30, 30)
np_filename = os.path.join(my_path, 'bruker_data', np_file[0])
np.testing.assert_array_equal(hype.data[:, :, 222:224],
np.load(np_filename))
assert hype.data.shape == (30, 30, 2048)
def test_load_16bit_reduced():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing downsampled 16bit bcf...')
s = load(filename, downsample=4, cutoff_at_kV=10)
bse, hype = s
# sem images are never downsampled
assert bse.data.shape == (30, 30)
np_filename = os.path.join(my_path, 'bruker_data', np_file[1])
np.testing.assert_array_equal(hype.data[:, :, 222:224],
np.load(np_filename))
assert hype.data.shape == (8, 8, 1047)
# Bruker saves all images in true 16bit:
assert bse.data.dtype == np.uint16
# hypermaps should always return unsigned integers:
assert str(hype.data.dtype)[0] == 'u'
def test_load_8bit():
for bcffile in test_files[1:3]:
filename = os.path.join(my_path, 'bruker_data', bcffile)
print('testing simple 8bit bcf...')
s = load(filename)
bse, hype = s[0], s[-1]
# Bruker saves all images in true 16bit:
assert bse.data.dtype == np.uint16
# hypermaps should always return unsigned integers:
assert str(hype.data.dtype)[0] == 'u'
def test_hyperspy_wrap():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing bcf wrap to hyperspy signal...')
from hyperspy.exceptions import VisibleDeprecationWarning
with pytest.warns(VisibleDeprecationWarning):
hype = load(filename, select_type='spectrum')
hype = load(filename, select_type='spectrum_image')
np.testing.assert_allclose(
hype.axes_manager[0].scale,
1.66740910949362,
atol=1E-12)
np.testing.assert_allclose(
hype.axes_manager[1].scale,
1.66740910949362,
atol=1E-12)
assert hype.axes_manager[1].units == 'µm'
np.testing.assert_allclose(hype.axes_manager[2].scale, 0.009999)
np.testing.assert_allclose(hype.axes_manager[2].offset, -0.47225277)
assert hype.axes_manager[2].units == 'keV'
assert hype.axes_manager[2].is_binned == True
md_ref = {
'Acquisition_instrument': {
'SEM': {
'beam_energy': 20,
'magnification': 1819.22595,
'Detector': {
'EDS': {
'elevation_angle': 35.0,
'detector_type': 'XFlash 6|10',
'azimuth_angle': 90.0,
'real_time': 70.07298,
'energy_resolution_MnKa': 130.0}},
'Stage': {
'tilt_alpha': 0.0,
'rotation': 326.10089,
'x': 66940.81,
'y': 54233.16,
'z': 39194.77}}},
'General': {
'original_filename':
'30x30_instructively_packed_16bit_compressed.bcf',
'title': 'EDX',
'date': '2018-10-04',
'time': '13:02:07'},
'Sample': {
'name': 'chevkinite',
'elements': ['Al', 'C', 'Ca', 'Ce', 'Fe', 'Gd', 'K', 'Mg', 'Na',
'Nd', 'O', 'P', 'Si', 'Sm', 'Th', 'Ti'],
'xray_lines': ['Al_Ka', 'C_Ka', 'Ca_Ka', 'Ce_La', 'Fe_Ka',
'Gd_La', 'K_Ka', 'Mg_Ka', 'Na_Ka', 'Nd_La',
'O_Ka', 'P_Ka', 'Si_Ka', 'Sm_La', 'Th_Ma',
'Ti_Ka']},
'Signal': {
'quantity': 'X-rays (Counts)',
'signal_type': 'EDS_SEM'},
'_HyperSpy': {
'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
filename_omd = os.path.join(my_path,
'bruker_data',
'30x30_original_metadata.json')
with open(filename_omd) as fn:
# original_metadata:
omd_ref = json.load(fn)
assert_deep_almost_equal(hype.metadata.as_dictionary(), md_ref)
assert_deep_almost_equal(hype.original_metadata.as_dictionary(), omd_ref)
assert hype.metadata.General.date == "2018-10-04"
assert hype.metadata.General.time == "13:02:07"
assert hype.metadata.Signal.quantity == "X-rays (Counts)"
def test_hyperspy_wrap_downsampled():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing bcf wrap to hyperspy signal...')
hype = load(filename, select_type='spectrum_image', downsample=5)
np.testing.assert_allclose(
hype.axes_manager[0].scale,
8.337045547468101,
atol=1E-12)
np.testing.assert_allclose(
hype.axes_manager[1].scale,
8.337045547468101,
atol=1E-12)
assert hype.axes_manager[1].units == 'µm'
def test_get_mode():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
s = load(filename, select_type='spectrum_image', instrument='SEM')
assert s.metadata.Signal.signal_type == "EDS_SEM"
assert isinstance(s, signals.EDSSEMSpectrum)
filename = os.path.join(my_path, 'bruker_data', test_files[0])
s = load(filename, select_type='spectrum_image', instrument='TEM')
assert s.metadata.Signal.signal_type == "EDS_TEM"
assert isinstance(s, signals.EDSTEMSpectrum)
filename = os.path.join(my_path, 'bruker_data', test_files[0])
s = load(filename, select_type='spectrum_image')
assert s.metadata.Signal.signal_type == "EDS_SEM"
assert isinstance(s, signals.EDSSEMSpectrum)
filename = os.path.join(my_path, 'bruker_data', test_files[3])
s = load(filename, select_type='spectrum_image')
assert s.metadata.Signal.signal_type == "EDS_TEM"
assert isinstance(s, signals.EDSTEMSpectrum)
def test_wrong_file():
filename = os.path.join(my_path, 'bruker_data', 'Nope.bcf')
with pytest.raises(TypeError):
load(filename)
def test_fast_bcf():
thingy = pytest.importorskip("hyperspy.io_plugins.unbcf_fast")
from hyperspy.io_plugins import bruker
for bcffile in test_files:
filename = os.path.join(my_path, 'bruker_data', bcffile)
thingy = bruker.BCF_reader(filename)
for j in range(2, 5, 1):
print('downsampling:', j)
bruker.fast_unbcf = True # manually enabling fast parsing
hmap1 = thingy.parse_hypermap(downsample=j) # using cython
bruker.fast_unbcf = False # manually disabling fast parsing
hmap2 = thingy.parse_hypermap(downsample=j) # py implementation
np.testing.assert_array_equal(hmap1, hmap2)
def test_decimal_regex():
from hyperspy.io_plugins.bruker import fix_dec_patterns
dummy_xml_positive = [b'<dummy_tag>85,658</dummy_tag>',
b'<dummy_tag>85,658E-8</dummy_tag>',
b'<dummy_tag>-85,658E-8</dummy_tag>',
b'<dum_tag>-85.658</dum_tag>', # negative check
b'<dum_tag>85.658E-8</dum_tag>'] # negative check
dummy_xml_negative = [b'<dum_tag>12,25,23,45,56,12,45</dum_tag>',
b'<dum_tag>12e1,23,-24E-5</dum_tag>']
for i in dummy_xml_positive:
assert b'85.658' in fix_dec_patterns.sub(b'\\1.\\2', i)
for j in dummy_xml_negative:
assert b'.' not in fix_dec_patterns.sub(b'\\1.\\2', j)
def test_all_spx_loads():
for spxfile in spx_files:
filename = os.path.join(my_path, 'bruker_data', spxfile)
s = load(filename)
assert s.data.dtype == np.uint64
assert s.metadata.Signal.signal_type == 'EDS_SEM'
def test_stand_alone_spx():
filename = os.path.join(my_path, 'bruker_data', 'bruker_nano.spx')
s = load(filename)
assert s.metadata.Sample.elements == ['Fe', 'S', 'Cu']
assert s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == 7.385
def test_bruker_XRF():
# See https://github.com/hyperspy/hyperspy/issues/2689
# Bruker M6 Jetstream SPX
filename = os.path.join(my_path, 'bruker_data',
'bruker_m6_jetstream_file_example.spx')
s = load(filename)
assert s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == 28.046
assert s.metadata.Acquisition_instrument.TEM.beam_energy == 50
|
erh3cq/hyperspy
|
hyperspy/tests/io/test_bruker.py
|
Python
|
gpl-3.0
| 9,549
|
#-*- coding: utf-8 -*-
# stino/serial.py
import os
import threading
import time
from . import constant
from . import pyserial
class SerialListener:
def __init__(self, menu):
self.menu = menu
self.serial_list = []
self.is_alive = False
def start(self):
if not self.is_alive:
self.is_alive = True
listener_thread = threading.Thread(target=self.update)
listener_thread.start()
def update(self):
while self.is_alive:
pre_serial_list = self.serial_list
self.serial_list = getSerialPortList()
if self.serial_list != pre_serial_list:
self.menu.refresh()
time.sleep(1)
def stop(self):
self.is_alive = False
def getSerialPortList():
serial_port_list = []
has_ports = False
if constant.sys_platform == "windows":
if constant.sys_version < 3:
import _winreg as winreg
else:
import winreg
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
reg = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path,)
has_ports = True
except WindowsError:
pass
if has_ports:
for i in range(128):
try:
name,value,type = winreg.EnumValue(reg,i)
except WindowsError:
pass
else:
serial_port_list.append(value)
else:
if constant.sys_platform == 'osx':
dev_names = ['tty.', 'cu.']
else:
dev_names = ['ttyACM', 'ttyUSB']
serial_port_list = []
dev_path = '/dev'
dev_file_list = os.listdir(dev_path)
for dev_file in dev_file_list:
for dev_name in dev_names:
if dev_name in dev_file:
dev_file_path = os.path.join(dev_path, dev_file)
serial_port_list.append(dev_file_path)
return serial_port_list
def isSerialAvailable(serial_port):
state = False
serial = pyserial.Serial()
serial.port = serial_port
try:
serial.open()
except pyserial.serialutil.SerialException:
pass
except UnicodeDecodeError:
pass
else:
if serial.isOpen():
state = True
serial.close()
return state
def getSelectedSerialPort():
serial_list = getSerialPortList()
serial_port_id = constant.sketch_settings.get('serial_port', -1)
serial_port = 'no_serial_port'
if serial_list:
try:
serial_port = serial_list[serial_port_id]
except IndexError:
serial_port = serial_list[0]
return serial_port
|
kierangraham/dotfiles
|
Sublime/Packages/Arduino-like IDE/app/serial.py
|
Python
|
bsd-2-clause
| 2,193
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, UbuntuPlugin
from socket import gethostname
class Ceph(Plugin, RedHatPlugin, UbuntuPlugin):
short_desc = 'CEPH distributed storage'
plugin_name = 'ceph'
profiles = ('storage', 'virt')
ceph_hostname = gethostname()
packages = (
'ceph',
'ceph-mds',
'ceph-common',
'libcephfs1',
'ceph-fs-common',
'calamari-server',
'librados2'
)
services = (
'ceph-nfs@pacemaker',
'ceph-mds@%s' % ceph_hostname,
'ceph-mon@%s' % ceph_hostname,
'ceph-mgr@%s' % ceph_hostname,
'ceph-radosgw@*',
'ceph-osd@*'
)
def setup(self):
all_logs = self.get_option("all_logs")
if not all_logs:
self.add_copy_spec([
"/var/log/ceph/*.log",
"/var/log/radosgw/*.log",
"/var/log/calamari/*.log"
])
else:
self.add_copy_spec([
"/var/log/ceph/",
"/var/log/calamari",
"/var/log/radosgw"
])
self.add_copy_spec([
"/etc/ceph/",
"/etc/calamari/",
"/var/lib/ceph/",
"/run/ceph/"
])
self.add_cmd_output([
"ceph mon stat",
"ceph mon_status",
"ceph quorum_status",
"ceph mgr module ls",
"ceph mgr metadata",
"ceph balancer status",
"ceph osd metadata",
"ceph osd erasure-code-profile ls",
"ceph report",
"ceph osd crush show-tunables",
"ceph-disk list",
"ceph versions",
"ceph features",
"ceph insights",
"ceph osd crush dump",
"ceph -v",
"ceph-volume lvm list",
"ceph crash stat",
"ceph crash ls",
"ceph config log",
"ceph config generate-minimal-conf",
"ceph config-key dump",
])
ceph_cmds = [
"status",
"health detail",
"osd tree",
"osd stat",
"osd df tree",
"osd dump",
"osd df",
"osd perf",
"osd blocked-by",
"osd pool ls detail",
"osd pool autoscale-status",
"osd numa-status",
"device ls",
"mon dump",
"mgr dump",
"mds stat",
"df",
"df detail",
"fs ls",
"fs dump",
"pg dump",
"pg stat",
]
self.add_cmd_output([
"ceph %s" % s for s in ceph_cmds
])
self.add_cmd_output([
"ceph %s --format json-pretty" % s for s in ceph_cmds
], subdir="json_output")
for service in self.services:
self.add_journal(units=service)
self.add_forbidden_path([
"/etc/ceph/*keyring*",
"/var/lib/ceph/*keyring*",
"/var/lib/ceph/*/*keyring*",
"/var/lib/ceph/*/*/*keyring*",
"/var/lib/ceph/osd",
"/var/lib/ceph/mon",
# Excludes temporary ceph-osd mount location like
# /var/lib/ceph/tmp/mnt.XXXX from sos collection.
"/var/lib/ceph/tmp/*mnt*",
"/etc/ceph/*bindpass*"
])
# vim: set et ts=4 sw=4 :
|
BryanQuigley/sos
|
sos/report/plugins/ceph.py
|
Python
|
gpl-2.0
| 3,783
|
def __load():
import imp, os, sys
ext = 'pygame/font.so'
for path in sys.path:
if not path.endswith('lib-dynload'):
continue
ext_path = os.path.join(path, ext)
if os.path.exists(ext_path):
mod = imp.load_dynamic(__name__, ext_path)
break
else:
raise ImportError(repr(ext) + " not found")
__load()
del __load
|
mokuki082/EggDrop
|
code/build/bdist.macosx-10.6-intel/python3.4-standalone/app/temp/pygame/font.py
|
Python
|
gpl-3.0
| 393
|
#
# Broker peering simulation (part 2) in Python
# Prototypes the request-reply flow
#
# While this example runs in a single process, that is just to make
# it easier to start and stop the example. Each thread has its own
# context and conceptually acts as a separate process.
#
# Author : Min RK
# Contact: benjaminrk(at)gmail(dot)com
#
import random
import sys
import threading
import time
import zmq
NBR_CLIENTS = 10
NBR_WORKERS = 3
def client_task(name, i):
"""Request-reply client using REQ socket"""
ctx = zmq.Context()
client = ctx.socket(zmq.REQ)
client.identity = "Client-%s-%s" % (name, i)
client.connect("ipc://%s-localfe.ipc" % name)
while True:
client.send("HELLO")
try:
reply = client.recv()
except zmq.ZMQError:
# interrupted
return
print "Client-%s: %s\n" % (i, reply),
time.sleep(1)
def worker_task(name, i):
"""Worker using REQ socket to do LRU routing"""
ctx = zmq.Context()
worker = ctx.socket(zmq.REQ)
worker.identity = "Worker-%s-%s" % (name, i)
worker.connect("ipc://%s-localbe.ipc" % name)
# Tell broker we're ready for work
worker.send("READY")
# Process messages as they arrive
while True:
try:
msg = worker.recv_multipart()
except zmq.ZMQError:
# interrupted
return
print "Worker-%s: %s\n" % (i, msg),
msg[-1] = "OK"
worker.send_multipart(msg)
def main(myself, peers):
print "I: preparing broker at %s..." % myself
# Prepare our context and sockets
ctx = zmq.Context()
# Bind cloud frontend to endpoint
cloudfe = ctx.socket(zmq.ROUTER)
cloudfe.setsockopt(zmq.IDENTITY, myself)
cloudfe.bind("ipc://%s-cloud.ipc" % myself)
# Connect cloud backend to all peers
cloudbe = ctx.socket(zmq.ROUTER)
cloudbe.setsockopt(zmq.IDENTITY, myself)
for peer in peers:
print "I: connecting to cloud frontend at", peer
cloudbe.connect("ipc://%s-cloud.ipc" % peer)
# Prepare local frontend and backend
localfe = ctx.socket(zmq.ROUTER)
localfe.bind("ipc://%s-localfe.ipc" % myself)
localbe = ctx.socket(zmq.ROUTER)
localbe.bind("ipc://%s-localbe.ipc" % myself)
# Get user to tell us when we can start...
raw_input("Press Enter when all brokers are started: ")
# create workers and clients threads
for i in range(NBR_WORKERS):
thread = threading.Thread(target=worker_task, args=(myself, i))
thread.daemon = True
thread.start()
for i in range(NBR_CLIENTS):
thread_c = threading.Thread(target=client_task, args=(myself, i))
thread_c.daemon = True
thread_c.start()
# Interesting part
# -------------------------------------------------------------
# Request-reply flow
# - Poll backends and process local/cloud replies
# - While worker available, route localfe to local or cloud
workers = []
# setup pollers
pollerbe = zmq.Poller()
pollerbe.register(localbe, zmq.POLLIN)
pollerbe.register(cloudbe, zmq.POLLIN)
pollerfe = zmq.Poller()
pollerfe.register(localfe, zmq.POLLIN)
pollerfe.register(cloudfe, zmq.POLLIN)
while True:
# If we have no workers anyhow, wait indefinitely
try:
events = dict(pollerbe.poll(1000 if workers else None))
except zmq.ZMQError:
break # interrupted
# Handle reply from local worker
msg = None
if localbe in events:
msg = localbe.recv_multipart()
(address, empty), msg = msg[:2], msg[2:]
workers.append(address)
# If it's READY, don't route the message any further
if msg[-1] == 'READY':
msg = None
elif cloudbe in events:
msg = cloudbe.recv_multipart()
(address, empty), msg = msg[:2], msg[2:]
# We don't use peer broker address for anything
if msg is not None:
address = msg[0]
if address in peers:
# Route reply to cloud if it's addressed to a broker
cloudfe.send_multipart(msg)
else:
# Route reply to client if we still need to
localfe.send_multipart(msg)
# Now route as many clients requests as we can handle
while workers:
events = dict(pollerfe.poll(0))
reroutable = False
# We'll do peer brokers first, to prevent starvation
if cloudfe in events:
msg = cloudfe.recv_multipart()
reroutable = False
elif localfe in events:
msg = localfe.recv_multipart()
reroutable = True
else:
break # No work, go back to backends
# If reroutable, send to cloud 20% of the time
# Here we'd normally use cloud status information
if reroutable and peers and random.randint(0, 4) == 0:
# Route to random broker peer
msg = [random.choice(peers), ''] + msg
cloudbe.send_multipart(msg)
else:
msg = [workers.pop(0), ''] + msg
localbe.send_multipart(msg)
if __name__ == '__main__':
if len(sys.argv) >= 2:
main(myself=sys.argv[1], peers=sys.argv[2:])
else:
print "Usage: peering2.py <me> [<peer_1> [... <peer_N>]]"
sys.exit(1)
|
krattai/noo-ebs
|
docs/zeroMQ-guide2/examples/Python/peering2.py
|
Python
|
bsd-2-clause
| 5,525
|
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterRoutingPrefixAscii(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_ascii.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.allhosts = []
for i in range(0, 4):
self.allhosts.append(self.add_server(Memcached()))
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_routing_prefix(self):
mcr = self.get_mcrouter()
nclusters = len(self.allhosts)
# first try setting a key to the local cluster
mcr.set("testkeylocal", "testvalue")
self.assertEqual(self.allhosts[0].get("testkeylocal"), "testvalue")
for i in range(1, nclusters):
self.assertIsNone(self.allhosts[i].get("testkeylocal"))
mcr.set("/*/*/testkey-routing", "testvalue")
# /*/*/ is all-fastest, and some requests might complete asynchronously.
# As a workaround, just wait
time.sleep(1)
local = self.allhosts[0].get("testkey-routing", True)
self.assertEqual(local["value"], "testvalue")
# make sure the key got set as "/*/*/key"
for i in range(1, nclusters):
local = self.allhosts[i].get("/*/*/testkey-routing", True)
self.assertEqual(local["value"], "testvalue")
class TestMcrouterRoutingPrefixUmbrella(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_umbrella.json'
class TestMcrouterRoutingPrefixOldNaming(TestMcrouterRoutingPrefixAscii):
config = './mcrouter/test/routing_prefix_test_old_naming.json'
class TestCustomRoutingPrefixes(McrouterTestCase):
config = './mcrouter/test/routing_prefix_test_custom.json'
extra_args = []
def setUp(self):
self.aa = self.add_server(Memcached())
self.ab = self.add_server(Memcached())
self.ba = self.add_server(Memcached())
self.bb = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_custom_routing_prefix(self):
mcr = self.get_mcrouter()
key = "/*/a/key"
value = "value"
mcr.set(key, value)
time.sleep(1)
self.assertEqual(self.aa.get('key'), value)
self.assertEqual(self.ba.get('key'), value)
key = "/b*/*/key"
value = "value2"
mcr.set(key, value)
time.sleep(1)
self.assertEqual(self.ba.get('key'), value)
self.assertEqual(self.bb.get('key'), value)
key = "/b/*b*/key"
value = "value3"
mcr.set(key, value)
self.assertEqual(self.bb.get('key'), value)
|
is00hcw/mcrouter
|
mcrouter/test/test_routing_prefixes.py
|
Python
|
bsd-3-clause
| 3,332
|
#!/usr/bin/env python
import shutil
import os
import glob
import textwrap
from Bio import SeqIO
import fasta_statter
"""
Generic assembler template for writing assembly bindings. Assembly Dispatch provides
a number of relevant arguments (mostly geared around the requirements of Velvet, but
this has proved to be robust for most complex de Bruijin assemblers on MiSeq data)
including the name of a function ("statter") in the fasta_statter module (invoke using
getattr(fasta_statter, statter)(<fasta_file>)
if desired) and a pair of functions for informatory reporting ("callback(string)") and
optionally updating the job's information in the database ("update_callback(dict)") by
dictionary key/value pairs.
Reducing the value of core_load will cause the Dispatch to invoke more jobs, if
possible, to maximize CPU usage. The assembler will re-order jobs, if necessary, to
avoid dispatching more than 8 cores' worth of jobs at a time.
You may assume that your reads have already been trimmed according to the specified
trimming parameters in the job database; no need to implement trimming here unless
its assembler-specific. If so, you should call update_callback to reflect this
trimming.
"""
description = "WORST: Win On Regular Sequencing Tallies. Cheating assembler. Appends reads to form one big contig until it hits 4.5mb. Worse-case scenario for traditional N50/contig number assembly assessment tools. DO NOT USE"
core_load = 1 #number of cores this assembler will max out
supports = ('MiSeq','IonTorrent')
def assemble(reads1, path, callback=lambda s: None, update_callback=lambda d: None, **kwargs):
d = {'assembly_version':'NEGATIVE ASSEMBLY CONTROL',
'average_coverage':'MISSASSEMBLY DO NOT USE',
'num_contigs':'1',
'n50':'',
'num_bases':'',
'fasta_file':'purposeful_misassembly.fasta',
'lib_insert_length':'MISASSEMBLY DO NOT USE'
}
try:
size = 0
callback("Starting WORST: The Assessment-Testing Purposeful Missassembly.")
try:
with open(glob.glob("{}/*.fasta".format(path))[0], 'r') as another_fasta:
for c in SeqIO.parse(another_fasta, "fasta"):
size += len(c)
except IndexError:
size = 4500000
callback("Target assembly size: {} bases.".format(size))
#assemble
a = ''
reads_grabbed = 0
with open(reads1, 'r') as reads_file:
reads = SeqIO.parse(reads_file, "fastq")
while len(a) < size:
try:
a += reads.next().seq
except StopIteration:
break
reads_grabbed += 1
if reads_grabbed % 5000 == 0:
callback("Appended {} reads.".format(reads_grabbed))
d['num_bases'] = len(a)
with open('{}/purposeful_misassembly.fasta'.format(path), 'w') as assembly:
assembly.write(">PURPOSEFUL_MISASSEMBLY_DO_NOT_USE\n")
for i in range(0, len(a), 80):
assembly.write(str(a[i:i+80]))
assembly.write("\n")
d.update(fasta_statter.stat_fasta('{}/purposeful_misassembly.fasta'.format(path)))
finally:
pass
return d
if __name__ == "__main__":
#debug
import datetime
def cb(d):
print "[{}] {}".format(datetime.datetime.today().ctime(), d)
def bcb(d):
for (k, v) in d.items():
cb("{} : {}".format(k, v))
print assemble(path='/home/justin.payne',
reads1='/shared/gn2/CFSANgenomes/CFSAN001659/CFSAN001659_01/CFSAN001659_S11_L001_R1_001.fastq',
reads2='/shared/gn2/CFSANgenomes/CFSAN001659/CFSAN001659_01/CFSAN001659_S11_L001_R2_001.fastq',
accession='CFSAN001659_01',
callback=cb,
update_callback=bcb,
k_value=144)
|
crashfrog/Dispatch
|
worst_assembler.py
|
Python
|
unlicense
| 3,511
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_scc(conf):
v=conf.env
cc=conf.find_program('cc',var='CC')
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v.CC_NAME='sun'
conf.get_suncc_version(cc)
@conf
def scc_common_flags(conf):
v=conf.env
v.CC_SRC_F=[]
v.CC_TGT_F=['-c','-o','']
if not v.LINK_CC:
v.LINK_CC=v.CC
v.CCLNK_SRC_F=''
v.CCLNK_TGT_F=['-o','']
v.CPPPATH_ST='-I%s'
v.DEFINES_ST='-D%s'
v.LIB_ST='-l%s'
v.LIBPATH_ST='-L%s'
v.STLIB_ST='-l%s'
v.STLIBPATH_ST='-L%s'
v.SONAME_ST='-Wl,-h,%s'
v.SHLIB_MARKER='-Bdynamic'
v.STLIB_MARKER='-Bstatic'
v.cprogram_PATTERN='%s'
v.CFLAGS_cshlib=['-xcode=pic32','-DPIC']
v.LINKFLAGS_cshlib=['-G']
v.cshlib_PATTERN='lib%s.so'
v.LINKFLAGS_cstlib=['-Bstatic']
v.cstlib_PATTERN='lib%s.a'
def configure(conf):
conf.find_scc()
conf.find_ar()
conf.scc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
Gnurou/glmark2
|
waflib/Tools/suncc.py
|
Python
|
gpl-3.0
| 1,111
|
import os
import sys
import numpy as np
sys.path.insert(0, os.getcwd() + '/../../tools/')
import wb
import lstm
def data(tskdir):
train = tskdir + 'data/train.txt'
valid = tskdir + 'data/valid.txt'
test = tskdir + 'data/test.txt'
return data_verfy([train, valid, test]) + data_wsj92nbest()
def data_verfy(paths):
for w in paths:
if not os.path.isfile(w):
print('[ERROR] no such file: ' + w)
return paths
def data_wsj92nbest():
root = './data/WSJ92-test-data/'
nbest = root + '1000best.sent'
trans = root + 'transcript.txt'
ac = root + '1000best.acscore'
lm = root + '1000best.lmscore'
return data_verfy([nbest, trans, ac, lm])
if __name__ == '__main__':
print(sys.argv)
if len(sys.argv) == 1:
print(' \"python run_lstm.py -train\" train lstm\n '
' \"python run_lstm.py -rescore\" rescore nbest\n'
' \"python run_lstm.py -wer\" compute WER'
)
fres = wb.FRes('result.txt')
for tsize in [1, 2, 4]:
tskdir = '{}/'.format(tsize)
print(tskdir)
workdir = tskdir + 'lstmlm/'
bindir = '../../tools/lstm'
model = lstm.model(bindir, workdir)
datas = data(tskdir)
hidden = 250
dropout = 0
epoch = 10
gpu = 1
write_model = workdir + 'h{}_dropout{}_epoch{}.lstm'.format(hidden, dropout, epoch)
write_name = '{}:LSTM:h{}d{}epoch{}'.format(tsize, hidden, dropout, epoch)
config = '-hidden {} -dropout {} -epoch {} -gpu {}'.format(hidden, dropout, epoch, gpu)
if '-train' in sys.argv or '-all' in sys.argv:
if os.path.exists(write_model):
print('exist lstm: ' + write_model);
else:
model.prepare(datas[0], datas[1], datas[2])
model.train(write_model, config)
if '-test' in sys.argv or '-all' in sys.argv:
PPL = [0]*3
PPL[0] = model.ppl(write_model, datas[0], config)
PPL[1] = model.ppl(write_model, datas[1], config)
PPL[2] = model.ppl(write_model, datas[2], config)
fres.AddPPL(write_name, PPL, datas[0:3])
if '-rescore' in sys.argv or '-all' in sys.argv:
write_lmscore = write_model[0:-5] + '.lmscore'
model.rescore(write_model, data(tskdir)[3], write_lmscore, config)
if '-wer' in sys.argv or '-all' in sys.argv:
[read_nbest, read_templ, read_acscore, read_lmscore] = data(tskdir)[3:7]
read_lmscore = write_model[0:-5] + '.lmscore'
[wer, lmscale, acscale] = wb.TuneWER(read_nbest, read_templ,
read_lmscore, read_acscore, np.linspace(0.1, 0.9, 9))
print('wer={:.4f} lmscale={:.2f} acscale={:.2f}'.format(wer, lmscale, acscale))
fres.AddWER(write_name, wer)
write_templ_txt = workdir + os.path.split(read_templ)[1] + '.w'
lstm.rmlabel(read_templ, write_templ_txt)
PPL_templ = model.ppl(write_model, write_templ_txt)
LL_templ = -wb.PPL2LL(PPL_templ, write_templ_txt)
fres.Add(write_name, ['LL-wsj', 'PPL-wsj'], [LL_templ, PPL_templ])
|
wbengine/SPMILM
|
egs/1-billion/run_lstm.py
|
Python
|
apache-2.0
| 3,239
|
#!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import memcached_workload_common
# TODO: This readline function is copied and pasted from big_values.py.
def readline(s):
buf = ""
while not buf.endswith("\r\n"):
buf += s.recv(1)
return buf
# TODO: This expect function is copied and pasted from big_values.py.
def expect(s, string):
msg = ""
while len(msg) < len(string):
msg += s.recv(len(string) - len(msg))
if msg != string:
raise ValueError("Didn't get what we expected: expected %s, got %s" % (string, msg));
def test_sizes(s, cmd, lo, hi):
print ("testing un%s with %d .. %d" % (cmd, lo, hi))
s.send("set x 0 0 %d\r\n" % lo + "a" * lo + "\r\n")
msg = readline(s)
if msg != "STORED\r\n":
print ("Server responded with '%s', should have been STORED." % msg)
raise ValueError("Initial large value of size %d not set. Weird." % lo)
# We send a malformed request, with an extra char!
s.send("%s x 0 0 %d\r\n" % (cmd, (hi - lo)) + "b" * (hi - lo) + "b" + "\r\n")
expect(s, "CLIENT_ERROR bad data chunk\r\n")
s.send("get x\r\n")
expect(s, "VALUE x 0 %d\r\n" % lo)
expect(s, "a" * lo + "\r\n")
expect(s, "END\r\n")
op = memcached_workload_common.option_parser_for_socket()
opts = op.parse(sys.argv)
with memcached_workload_common.make_socket_connection(opts) as s:
sizes = [1, 100, 300, 1000, 8000, 700000]
for (lo, hi) in [(x, y) for x in sizes for y in sizes if x < y]:
for cmd in ["append", "prepend"]:
test_sizes(s, cmd, lo, hi)
|
krahman/rethinkdb
|
test/memcached_workloads/unappend_unprepend.py
|
Python
|
agpl-3.0
| 1,720
|
import os
import sys
import yaml
import pivotaltracker
from pivotaltracker._termcolor import styled
from pivotaltracker._helpers import command, choose_command
__CONFPATH = os.path.join(os.path.expanduser("~"), ".pivotaltracker")
def required_style(msg):
return styled(" %s " % msg, attrs=["reverse"])
def optional_style(msg):
return styled(" %s " % msg, attrs=["reverse"])
def result_style(msg):
return styled(" %s " % msg, attrs=["bgmagenta", "white"])
def _load_config():
if not os.path.exists(__CONFPATH):
# collect config information
print required_style("What is your Pivotal Token?")
token = raw_input("(go to https://www.pivotaltracker.com/profile to generate one): ")
print required_style("What Project do you want to track by default?")
project_id = raw_input("(visit the project in your browser and the Project ID will be in the URL, e.g. https://www.pivotaltracker.com/projects/<PROJECT-ID>): ")
# print required_style("What is your full name in that project?")
# username = raw_input("(visit https://www.pivotaltracker.com/projects/%s/overview to see the names): " % project_id)
# dump the config
data = yaml.dump(dict(token=token, project_id=int(project_id)), default_flow_style=False)
# save the file
fd = open(__CONFPATH, "w+")
print "you can update your configuration at any time by editing %s" % __CONFPATH
fd.write(data);
fd.close()
# load the config and return the values
config = yaml.load(open(__CONFPATH, "r").read())
token = config["token"]
project_id = config["project_id"]
return token, project_id
def header(msg, attrs):
attrs += ["bold"]
padding = styled(" "*8 + " "*len(msg), attrs=attrs)
padded_message = styled(" "*4 + msg + " "*4, attrs=attrs)
return "%s\n%s\n%s" % (padding, padded_message, padding)
def run(argv=sys.argv):
"""commandline client"""
@command
def chore(parser):
"""creates a chore in pivotal"""
print header("CHORE", attrs=["bgblue", "white"])
print
# get config values
token, project_id = _load_config()
# get other inputs
print required_style("chore name")
name = raw_input("> ")
print
print optional_style("extra description for the chore"), "(optional)"
description = raw_input("> ")
# create the client
client = pivotaltracker.Client(token=token)
response = client.add_story(
project_id=project_id,
name=name,
description=description,
story_type="chore",
)
# print the url of the story
print result_style(response["story"]["url"])
@command
def bug(parser):
"""creates a bug in pivotal"""
print header("BUG", attrs=["bgred", "white"])
print
# get config values
token, project_id = _load_config()
# get other inputs
print required_style("bug name")
name = raw_input("> ")
print
# input the steps
step_idx = 1
keep_going = True
description = ""
while keep_going:
print optional_style("step %s" % step_idx), "(just leave a blank entry to stop giving steps)"
new_step = raw_input("> ")
if new_step.strip():
description += "%s. %s\n" % (step_idx, new_step)
keep_going = True
step_idx += 1
else:
keep_going = False
# get any extra description
print
print optional_style("extra description for the bug %s" % step_idx), "(optional)"
extra_description = raw_input("> ")
description += "\n" + extra_description
# create the client
client = pivotaltracker.Client(token=token)
response = client.add_story(
project_id=project_id,
name=name,
description=description,
story_type="bug",
)
# print the url of the story
print result_style(response["story"]["url"])
@command
def feature(parser):
"""creates a feature in pivotal"""
print header("FEATURE", attrs=["bgyellow", "black"])
print
# get config values
token, project_id = _load_config()
# get other inputs
print required_style("feature name")
name = raw_input("> ")
print
# walk through feature
description = ""
print required_style("As a...")
description += "As a " + raw_input("> ")
print required_style("I want to...")
description += "\n\nI want to " + raw_input("> ")
print required_style("So that...")
description += "\n\nSo that " + raw_input("> ")
description += "\n\n"
# optional extra description
print
print optional_style("extra description for the feature"), "(optional)"
extra_description = raw_input("> ")
description += extra_description
# create the client
client = pivotaltracker.Client(token=token)
response = client.add_story(
project_id=project_id,
name=name,
description=description,
story_type="feature",
)
# print the url of the story
print result_style(response["story"]["url"])
choose_command(argv=argv)
|
harperreed/pivotaltracker
|
pivotaltracker/tool.py
|
Python
|
bsd-3-clause
| 5,691
|
"""utils.
Usage:
cvrminer.utils make-data-directory
"""
from __future__ import absolute_import, division, print_function
import errno
from os import makedirs
from os.path import join
from .config import data_directory
def make_data_directory(*args):
"""Make data directory.
The data_directory is by default `cvrminer_data`. If `directory` is None
then this directory is created if it does not already exist.
Parameters
----------
directory : str or None
Name of directory
"""
if len(args) == 0:
make_data_directory(data_directory())
else:
try:
makedirs(join(*args))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if arguments['make-data-directory']:
make_data_directory()
if __name__ == '__main__':
main()
|
fnielsen/cvrminer
|
cvrminer/utils.py
|
Python
|
apache-2.0
| 988
|
import sys
import numpy as np
# compute sigmoid nonlinearity
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
return output*(1-output)
# input dataset
X = np.array([ [0,1],
[0,1],
[1,0],
[1,0] ])
# output dataset
y = np.array([[0,0,1,1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
synapse_0 = 2*np.random.random((2,1)) - 1
for iter in xrange(10000):
# forward propagation
layer_0 = X
layer_1 = sigmoid(np.dot(layer_0,synapse_0))
# how much did we miss?
layer_1_error = layer_1 - y
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
layer_1_delta = layer_1_error * sigmoid_output_to_derivative(layer_1)
synapse_0_derivative = np.dot(layer_0.T,layer_1_delta)
# update weights
synapse_0 -= synapse_0_derivative
print "Output After Training:"
print layer_1
|
jatinmistry13/StochasticGradientDescentNeuralNetwork
|
two_layer_neural_network.py
|
Python
|
mit
| 1,126
|
"""
Queue class
"""
class Queue:
"""
A simple implementation of a FIFO queue.
"""
def __init__(self):
"""
Initialize the queue.
"""
self._items = []
def __len__(self):
"""
Return the number of items in the queue.
"""
return len(self._items)
def __iter__(self):
"""
Create an iterator for the queue.
"""
for item in self._items:
yield item
def __str__(self):
"""
Return a string representation of the queue.
"""
return str(self._items)
def enqueue(self, item):
"""
Add item to the queue.
"""
self._items.append(item)
def dequeue(self):
"""
Remove and return the least recently inserted item.
"""
return self._items.pop(0)
def clear(self):
"""
Remove all items from the queue.
"""
self._items = []
|
maistrovas/My-Courses-Solutions
|
Coursera Principles of Computing (Part 1, 2)/week_6/Queue_Class.py
|
Python
|
mit
| 1,054
|
# Generated from tools/antlr/Fcl.g4 by ANTLR 4.5.3
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2")
buf.write(u"_\u0306\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4")
buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r")
buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4")
buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35")
buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4")
buf.write(u"$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t")
buf.write(u",\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63")
buf.write(u"\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\4")
buf.write(u"9\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA")
buf.write(u"\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\t")
buf.write(u"J\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S")
buf.write(u"\tS\4T\tT\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4")
buf.write(u"\\\t\\\4]\t]\4^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\t")
buf.write(u"d\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4")
buf.write(u"\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7")
buf.write(u"\3\7\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n\3\n")
buf.write(u"\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f")
buf.write(u"\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3")
buf.write(u"\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21")
buf.write(u"\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3")
buf.write(u"\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24")
buf.write(u"\3\24\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3")
buf.write(u"\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30")
buf.write(u"\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3")
buf.write(u"\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32")
buf.write(u"\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3")
buf.write(u"\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33")
buf.write(u"\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3")
buf.write(u"\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34")
buf.write(u"\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3")
buf.write(u"\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write(u"\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3")
buf.write(u"\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3$\3$\3")
buf.write(u"$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%")
buf.write(u"\3%\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3)\3)\3)\3*\3")
buf.write(u"*\3*\3*\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-")
buf.write(u"\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3\60\3\60")
buf.write(u"\3\60\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\63\3\63\3")
buf.write(u"\63\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\65")
buf.write(u"\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3")
buf.write(u"\67\3\67\3\67\38\38\38\39\39\39\39\39\3:\3:\3:\3:\3:")
buf.write(u"\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3<\3<\3<\3<\3=\3=\3=\3")
buf.write(u"=\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3?\3?\3?\3?\3@\3@")
buf.write(u"\3@\3@\3@\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3B\3C\3C\3C\3")
buf.write(u"C\3C\3C\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3E\3E\3E\3E")
buf.write(u"\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3H\6")
buf.write(u"H\u027a\nH\rH\16H\u027b\3H\3H\3I\5I\u0281\nI\3I\6I\u0284")
buf.write(u"\nI\rI\16I\u0285\3I\3I\3J\3J\3J\3K\3K\3L\3L\3M\3M\3N")
buf.write(u"\3N\3N\3O\3O\3P\3P\3Q\3Q\3R\3R\3S\3S\3T\3T\3U\3U\3V\3")
buf.write(u"V\3W\3W\3X\3X\3Y\3Y\3Z\6Z\u02ad\nZ\rZ\16Z\u02ae\3[\3")
buf.write(u"[\3\\\3\\\5\\\u02b5\n\\\3]\3]\3^\3^\3_\3_\5_\u02bd\n")
buf.write(u"_\3`\3`\5`\u02c1\n`\3`\3`\3`\5`\u02c6\n`\3`\3`\3`\5`")
buf.write(u"\u02cb\n`\3`\5`\u02ce\n`\3a\3a\3a\3a\7a\u02d4\na\fa\16")
buf.write(u"a\u02d7\13a\3a\3a\3a\3a\5a\u02dd\na\3a\3a\3b\3b\3b\3")
buf.write(u"b\7b\u02e5\nb\fb\16b\u02e8\13b\3b\3b\3b\3b\5b\u02ee\n")
buf.write(u"b\3b\3b\3c\3c\3c\3c\7c\u02f6\nc\fc\16c\u02f9\13c\3c\3")
buf.write(u"c\3c\3c\3d\3d\3d\7d\u0302\nd\fd\16d\u0305\13d\4\u02d5")
buf.write(u"\u02e6\2e\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25")
buf.write(u"\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26")
buf.write(u"+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C")
buf.write(u"#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66")
buf.write(u"k\67m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087")
buf.write(u"E\u0089F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097")
buf.write(u"M\u0099N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7")
buf.write(u"U\u00a9V\u00abW\u00adX\u00afY\u00b1Z\u00b3\2\u00b5\2")
buf.write(u"\u00b7\2\u00b9\2\u00bb\2\u00bd\2\u00bf[\u00c1\\\u00c3")
buf.write(u"]\u00c5^\u00c7_\3\2\34\4\2CCcc\4\2DDdd\4\2UUuu\4\2EE")
buf.write(u"ee\4\2WWww\4\2VVvv\4\2PPpp\4\2FFff\4\2OOoo\4\2KKkk\4")
buf.write(u"\2HHhh\4\2QQqq\4\2GGgg\4\2IIii\4\2NNnn\4\2\\\\||\4\2")
buf.write(u"[[{{\4\2ZZzz\4\2MMmm\4\2TTtt\4\2XXxx\4\2RRrr\4\2JJjj")
buf.write(u"\4\2YYyy\4\2\13\13\"\"\4\2\f\f\17\17\u0312\2\3\3\2\2")
buf.write(u"\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2")
buf.write(u"\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25")
buf.write(u"\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35")
buf.write(u"\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2")
buf.write(u"\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3")
buf.write(u"\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3")
buf.write(u"\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write(u"A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write(u"\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write(u"\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write(u"\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write(u"\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2")
buf.write(u"q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2")
buf.write(u"\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2")
buf.write(u"\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089")
buf.write(u"\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2")
buf.write(u"\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2")
buf.write(u"\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d")
buf.write(u"\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2")
buf.write(u"\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2")
buf.write(u"\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1")
buf.write(u"\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2")
buf.write(u"\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\3\u00c9\3\2\2\2")
buf.write(u"\5\u00cd\3\2\2\2\7\u00d2\3\2\2\2\t\u00d6\3\2\2\2\13\u00da")
buf.write(u"\3\2\2\2\r\u00df\3\2\2\2\17\u00e4\3\2\2\2\21\u00e9\3")
buf.write(u"\2\2\2\23\u00ed\3\2\2\2\25\u00f4\3\2\2\2\27\u00f8\3\2")
buf.write(u"\2\2\31\u00fc\3\2\2\2\33\u0100\3\2\2\2\35\u0104\3\2\2")
buf.write(u"\2\37\u0108\3\2\2\2!\u010d\3\2\2\2#\u0112\3\2\2\2%\u0116")
buf.write(u"\3\2\2\2\'\u011e\3\2\2\2)\u0128\3\2\2\2+\u012d\3\2\2")
buf.write(u"\2-\u0132\3\2\2\2/\u0138\3\2\2\2\61\u0141\3\2\2\2\63")
buf.write(u"\u014f\3\2\2\2\65\u0162\3\2\2\2\67\u016e\3\2\2\29\u017c")
buf.write(u"\3\2\2\2;\u0184\3\2\2\2=\u0188\3\2\2\2?\u0191\3\2\2\2")
buf.write(u"A\u019a\3\2\2\2C\u01a0\3\2\2\2E\u01a7\3\2\2\2G\u01ad")
buf.write(u"\3\2\2\2I\u01bc\3\2\2\2K\u01c4\3\2\2\2M\u01c7\3\2\2\2")
buf.write(u"O\u01ca\3\2\2\2Q\u01cd\3\2\2\2S\u01d0\3\2\2\2U\u01d4")
buf.write(u"\3\2\2\2W\u01d8\3\2\2\2Y\u01df\3\2\2\2[\u01e3\3\2\2\2")
buf.write(u"]\u01ea\3\2\2\2_\u01f1\3\2\2\2a\u01f4\3\2\2\2c\u01f7")
buf.write(u"\3\2\2\2e\u01fb\3\2\2\2g\u0200\3\2\2\2i\u0203\3\2\2\2")
buf.write(u"k\u020a\3\2\2\2m\u020f\3\2\2\2o\u0215\3\2\2\2q\u0218")
buf.write(u"\3\2\2\2s\u021d\3\2\2\2u\u0227\3\2\2\2w\u022c\3\2\2\2")
buf.write(u"y\u0230\3\2\2\2{\u023b\3\2\2\2}\u023f\3\2\2\2\177\u0243")
buf.write(u"\3\2\2\2\u0081\u0248\3\2\2\2\u0083\u024d\3\2\2\2\u0085")
buf.write(u"\u0253\3\2\2\2\u0087\u0259\3\2\2\2\u0089\u025e\3\2\2")
buf.write(u"\2\u008b\u0268\3\2\2\2\u008d\u0273\3\2\2\2\u008f\u0279")
buf.write(u"\3\2\2\2\u0091\u0283\3\2\2\2\u0093\u0289\3\2\2\2\u0095")
buf.write(u"\u028c\3\2\2\2\u0097\u028e\3\2\2\2\u0099\u0290\3\2\2")
buf.write(u"\2\u009b\u0292\3\2\2\2\u009d\u0295\3\2\2\2\u009f\u0297")
buf.write(u"\3\2\2\2\u00a1\u0299\3\2\2\2\u00a3\u029b\3\2\2\2\u00a5")
buf.write(u"\u029d\3\2\2\2\u00a7\u029f\3\2\2\2\u00a9\u02a1\3\2\2")
buf.write(u"\2\u00ab\u02a3\3\2\2\2\u00ad\u02a5\3\2\2\2\u00af\u02a7")
buf.write(u"\3\2\2\2\u00b1\u02a9\3\2\2\2\u00b3\u02ac\3\2\2\2\u00b5")
buf.write(u"\u02b0\3\2\2\2\u00b7\u02b4\3\2\2\2\u00b9\u02b6\3\2\2")
buf.write(u"\2\u00bb\u02b8\3\2\2\2\u00bd\u02bc\3\2\2\2\u00bf\u02c0")
buf.write(u"\3\2\2\2\u00c1\u02cf\3\2\2\2\u00c3\u02e0\3\2\2\2\u00c5")
buf.write(u"\u02f1\3\2\2\2\u00c7\u02fe\3\2\2\2\u00c9\u00ca\t\2\2")
buf.write(u"\2\u00ca\u00cb\t\3\2\2\u00cb\u00cc\t\4\2\2\u00cc\4\3")
buf.write(u"\2\2\2\u00cd\u00ce\t\2\2\2\u00ce\u00cf\t\5\2\2\u00cf")
buf.write(u"\u00d0\t\5\2\2\u00d0\u00d1\t\6\2\2\u00d1\6\3\2\2\2\u00d2")
buf.write(u"\u00d3\t\2\2\2\u00d3\u00d4\t\5\2\2\u00d4\u00d5\t\7\2")
buf.write(u"\2\u00d5\b\3\2\2\2\u00d6\u00d7\t\2\2\2\u00d7\u00d8\t")
buf.write(u"\b\2\2\u00d8\u00d9\t\t\2\2\u00d9\n\3\2\2\2\u00da\u00db")
buf.write(u"\t\2\2\2\u00db\u00dc\t\4\2\2\u00dc\u00dd\t\6\2\2\u00dd")
buf.write(u"\u00de\t\n\2\2\u00de\f\3\2\2\2\u00df\u00e0\t\3\2\2\u00e0")
buf.write(u"\u00e1\t\t\2\2\u00e1\u00e2\t\13\2\2\u00e2\u00e3\t\f\2")
buf.write(u"\2\u00e3\16\3\2\2\2\u00e4\u00e5\t\3\2\2\u00e5\u00e6\t")
buf.write(u"\4\2\2\u00e6\u00e7\t\6\2\2\u00e7\u00e8\t\n\2\2\u00e8")
buf.write(u"\20\3\2\2\2\u00e9\u00ea\t\5\2\2\u00ea\u00eb\t\r\2\2\u00eb")
buf.write(u"\u00ec\t\2\2\2\u00ec\22\3\2\2\2\u00ed\u00ee\t\5\2\2\u00ee")
buf.write(u"\u00ef\t\r\2\2\u00ef\u00f0\t\4\2\2\u00f0\u00f1\t\13\2")
buf.write(u"\2\u00f1\u00f2\t\b\2\2\u00f2\u00f3\t\16\2\2\u00f3\24")
buf.write(u"\3\2\2\2\u00f4\u00f5\t\5\2\2\u00f5\u00f6\t\r\2\2\u00f6")
buf.write(u"\u00f7\t\17\2\2\u00f7\26\3\2\2\2\u00f8\u00f9\t\3\2\2")
buf.write(u"\u00f9\u00fa\t\r\2\2\u00fa\u00fb\t\2\2\2\u00fb\30\3\2")
buf.write(u"\2\2\u00fc\u00fd\t\n\2\2\u00fd\u00fe\t\r\2\2\u00fe\u00ff")
buf.write(u"\t\n\2\2\u00ff\32\3\2\2\2\u0100\u0101\t\20\2\2\u0101")
buf.write(u"\u0102\t\r\2\2\u0102\u0103\t\n\2\2\u0103\34\3\2\2\2\u0104")
buf.write(u"\u0105\t\4\2\2\u0105\u0106\t\r\2\2\u0106\u0107\t\n\2")
buf.write(u"\2\u0107\36\3\2\2\2\u0108\u0109\t\5\2\2\u0109\u010a\t")
buf.write(u"\r\2\2\u010a\u010b\t\17\2\2\u010b\u010c\t\4\2\2\u010c")
buf.write(u" \3\2\2\2\u010d\u010e\t\5\2\2\u010e\u010f\t\r\2\2\u010f")
buf.write(u"\u0110\t\17\2\2\u0110\u0111\t\f\2\2\u0111\"\3\2\2\2\u0112")
buf.write(u"\u0113\t\5\2\2\u0113\u0114\t\r\2\2\u0114\u0115\t\4\2")
buf.write(u"\2\u0115$\3\2\2\2\u0116\u0117\t\t\2\2\u0117\u0118\t\16")
buf.write(u"\2\2\u0118\u0119\t\f\2\2\u0119\u011a\t\2\2\2\u011a\u011b")
buf.write(u"\t\6\2\2\u011b\u011c\t\20\2\2\u011c\u011d\t\7\2\2\u011d")
buf.write(u"&\3\2\2\2\u011e\u011f\t\t\2\2\u011f\u0120\t\16\2\2\u0120")
buf.write(u"\u0121\t\f\2\2\u0121\u0122\t\6\2\2\u0122\u0123\t\21\2")
buf.write(u"\2\u0123\u0124\t\21\2\2\u0124\u0125\t\13\2\2\u0125\u0126")
buf.write(u"\t\f\2\2\u0126\u0127\t\22\2\2\u0127(\3\2\2\2\u0128\u0129")
buf.write(u"\t\t\2\2\u0129\u012a\t\n\2\2\u012a\u012b\t\2\2\2\u012b")
buf.write(u"\u012c\t\23\2\2\u012c*\3\2\2\2\u012d\u012e\t\t\2\2\u012e")
buf.write(u"\u012f\t\n\2\2\u012f\u0130\t\13\2\2\u0130\u0131\t\b\2")
buf.write(u"\2\u0131,\3\2\2\2\u0132\u0133\t\t\2\2\u0133\u0134\t\4")
buf.write(u"\2\2\u0134\u0135\t\13\2\2\u0135\u0136\t\17\2\2\u0136")
buf.write(u"\u0137\t\n\2\2\u0137.\3\2\2\2\u0138\u0139\t\16\2\2\u0139")
buf.write(u"\u013a\t\13\2\2\u013a\u013b\t\b\2\2\u013b\u013c\t\4\2")
buf.write(u"\2\u013c\u013d\t\7\2\2\u013d\u013e\t\16\2\2\u013e\u013f")
buf.write(u"\t\13\2\2\u013f\u0140\t\b\2\2\u0140\60\3\2\2\2\u0141")
buf.write(u"\u0142\t\16\2\2\u0142\u0143\t\b\2\2\u0143\u0144\t\t\2")
buf.write(u"\2\u0144\u0145\7a\2\2\u0145\u0146\t\t\2\2\u0146\u0147")
buf.write(u"\t\16\2\2\u0147\u0148\t\f\2\2\u0148\u0149\t\6\2\2\u0149")
buf.write(u"\u014a\t\21\2\2\u014a\u014b\t\21\2\2\u014b\u014c\t\13")
buf.write(u"\2\2\u014c\u014d\t\f\2\2\u014d\u014e\t\22\2\2\u014e\62")
buf.write(u"\3\2\2\2\u014f\u0150\t\16\2\2\u0150\u0151\t\b\2\2\u0151")
buf.write(u"\u0152\t\t\2\2\u0152\u0153\7a\2\2\u0153\u0154\t\f\2\2")
buf.write(u"\u0154\u0155\t\6\2\2\u0155\u0156\t\b\2\2\u0156\u0157")
buf.write(u"\t\5\2\2\u0157\u0158\t\7\2\2\u0158\u0159\t\13\2\2\u0159")
buf.write(u"\u015a\t\r\2\2\u015a\u015b\t\b\2\2\u015b\u015c\7a\2\2")
buf.write(u"\u015c\u015d\t\3\2\2\u015d\u015e\t\20\2\2\u015e\u015f")
buf.write(u"\t\r\2\2\u015f\u0160\t\5\2\2\u0160\u0161\t\24\2\2\u0161")
buf.write(u"\64\3\2\2\2\u0162\u0163\t\16\2\2\u0163\u0164\t\b\2\2")
buf.write(u"\u0164\u0165\t\t\2\2\u0165\u0166\7a\2\2\u0166\u0167\t")
buf.write(u"\f\2\2\u0167\u0168\t\6\2\2\u0168\u0169\t\21\2\2\u0169")
buf.write(u"\u016a\t\21\2\2\u016a\u016b\t\13\2\2\u016b\u016c\t\f")
buf.write(u"\2\2\u016c\u016d\t\22\2\2\u016d\66\3\2\2\2\u016e\u016f")
buf.write(u"\t\16\2\2\u016f\u0170\t\b\2\2\u0170\u0171\t\t\2\2\u0171")
buf.write(u"\u0172\7a\2\2\u0172\u0173\t\25\2\2\u0173\u0174\t\6\2")
buf.write(u"\2\u0174\u0175\t\20\2\2\u0175\u0176\t\16\2\2\u0176\u0177")
buf.write(u"\t\3\2\2\u0177\u0178\t\20\2\2\u0178\u0179\t\r\2\2\u0179")
buf.write(u"\u017a\t\5\2\2\u017a\u017b\t\24\2\2\u017b8\3\2\2\2\u017c")
buf.write(u"\u017d\t\16\2\2\u017d\u017e\t\b\2\2\u017e\u017f\t\t\2")
buf.write(u"\2\u017f\u0180\7a\2\2\u0180\u0181\t\26\2\2\u0181\u0182")
buf.write(u"\t\2\2\2\u0182\u0183\t\25\2\2\u0183:\3\2\2\2\u0184\u0185")
buf.write(u"\t\16\2\2\u0185\u0186\t\23\2\2\u0186\u0187\t\27\2\2\u0187")
buf.write(u"<\3\2\2\2\u0188\u0189\t\30\2\2\u0189\u018a\t\2\2\2\u018a")
buf.write(u"\u018b\t\n\2\2\u018b\u018c\t\2\2\2\u018c\u018d\t\5\2")
buf.write(u"\2\u018d\u018e\t\30\2\2\u018e\u018f\t\16\2\2\u018f\u0190")
buf.write(u"\t\25\2\2\u0190>\3\2\2\2\u0191\u0192\t\f\2\2\u0192\u0193")
buf.write(u"\t\6\2\2\u0193\u0194\t\b\2\2\u0194\u0195\t\5\2\2\u0195")
buf.write(u"\u0196\t\7\2\2\u0196\u0197\t\13\2\2\u0197\u0198\t\r\2")
buf.write(u"\2\u0198\u0199\t\b\2\2\u0199@\3\2\2\2\u019a\u019b\t\17")
buf.write(u"\2\2\u019b\u019c\t\2\2\2\u019c\u019d\t\6\2\2\u019d\u019e")
buf.write(u"\t\4\2\2\u019e\u019f\t\4\2\2\u019fB\3\2\2\2\u01a0\u01a1")
buf.write(u"\t\17\2\2\u01a1\u01a2\t\2\2\2\u01a2\u01a3\t\6\2\2\u01a3")
buf.write(u"\u01a4\t\4\2\2\u01a4\u01a5\t\4\2\2\u01a5\u01a6\7\64\2")
buf.write(u"\2\u01a6D\3\2\2\2\u01a7\u01a8\t\17\2\2\u01a8\u01a9\t")
buf.write(u"\3\2\2\u01a9\u01aa\t\16\2\2\u01aa\u01ab\t\20\2\2\u01ab")
buf.write(u"\u01ac\t\20\2\2\u01acF\3\2\2\2\u01ad\u01ae\t\f\2\2\u01ae")
buf.write(u"\u01af\t\6\2\2\u01af\u01b0\t\b\2\2\u01b0\u01b1\t\5\2")
buf.write(u"\2\u01b1\u01b2\t\7\2\2\u01b2\u01b3\t\13\2\2\u01b3\u01b4")
buf.write(u"\t\r\2\2\u01b4\u01b5\t\b\2\2\u01b5\u01b6\7a\2\2\u01b6")
buf.write(u"\u01b7\t\3\2\2\u01b7\u01b8\t\20\2\2\u01b8\u01b9\t\r\2")
buf.write(u"\2\u01b9\u01ba\t\5\2\2\u01ba\u01bb\t\24\2\2\u01bbH\3")
buf.write(u"\2\2\2\u01bc\u01bd\t\f\2\2\u01bd\u01be\t\6\2\2\u01be")
buf.write(u"\u01bf\t\21\2\2\u01bf\u01c0\t\21\2\2\u01c0\u01c1\t\13")
buf.write(u"\2\2\u01c1\u01c2\t\f\2\2\u01c2\u01c3\t\22\2\2\u01c3J")
buf.write(u"\3\2\2\2\u01c4\u01c5\t\13\2\2\u01c5\u01c6\t\f\2\2\u01c6")
buf.write(u"L\3\2\2\2\u01c7\u01c8\t\13\2\2\u01c8\u01c9\t\4\2\2\u01c9")
buf.write(u"N\3\2\2\2\u01ca\u01cb\t\20\2\2\u01cb\u01cc\t\n\2\2\u01cc")
buf.write(u"P\3\2\2\2\u01cd\u01ce\t\20\2\2\u01ce\u01cf\t\b\2\2\u01cf")
buf.write(u"R\3\2\2\2\u01d0\u01d1\t\20\2\2\u01d1\u01d2\t\r\2\2\u01d2")
buf.write(u"\u01d3\t\17\2\2\u01d3T\3\2\2\2\u01d4\u01d5\t\n\2\2\u01d5")
buf.write(u"\u01d6\t\2\2\2\u01d6\u01d7\t\23\2\2\u01d7V\3\2\2\2\u01d8")
buf.write(u"\u01d9\t\n\2\2\u01d9\u01da\t\16\2\2\u01da\u01db\t\7\2")
buf.write(u"\2\u01db\u01dc\t\30\2\2\u01dc\u01dd\t\r\2\2\u01dd\u01de")
buf.write(u"\t\t\2\2\u01deX\3\2\2\2\u01df\u01e0\t\n\2\2\u01e0\u01e1")
buf.write(u"\t\13\2\2\u01e1\u01e2\t\b\2\2\u01e2Z\3\2\2\2\u01e3\u01e4")
buf.write(u"\t\b\2\2\u01e4\u01e5\t\13\2\2\u01e5\u01e6\t\27\2\2\u01e6")
buf.write(u"\u01e7\t\n\2\2\u01e7\u01e8\t\13\2\2\u01e8\u01e9\t\b\2")
buf.write(u"\2\u01e9\\\3\2\2\2\u01ea\u01eb\t\b\2\2\u01eb\u01ec\t")
buf.write(u"\13\2\2\u01ec\u01ed\t\27\2\2\u01ed\u01ee\t\n\2\2\u01ee")
buf.write(u"\u01ef\t\2\2\2\u01ef\u01f0\t\23\2\2\u01f0^\3\2\2\2\u01f1")
buf.write(u"\u01f2\t\n\2\2\u01f2\u01f3\t\n\2\2\u01f3`\3\2\2\2\u01f4")
buf.write(u"\u01f5\t\b\2\2\u01f5\u01f6\t\5\2\2\u01f6b\3\2\2\2\u01f7")
buf.write(u"\u01f8\t\b\2\2\u01f8\u01f9\t\r\2\2\u01f9\u01fa\t\7\2")
buf.write(u"\2\u01fad\3\2\2\2\u01fb\u01fc\t\b\2\2\u01fc\u01fd\t\4")
buf.write(u"\2\2\u01fd\u01fe\t\6\2\2\u01fe\u01ff\t\n\2\2\u01fff\3")
buf.write(u"\2\2\2\u0200\u0201\t\r\2\2\u0201\u0202\t\25\2\2\u0202")
buf.write(u"h\3\2\2\2\u0203\u0204\t\27\2\2\u0204\u0205\t\25\2\2\u0205")
buf.write(u"\u0206\t\r\2\2\u0206\u0207\t\3\2\2\u0207\u0208\t\r\2")
buf.write(u"\2\u0208\u0209\t\25\2\2\u0209j\3\2\2\2\u020a\u020b\t")
buf.write(u"\27\2\2\u020b\u020c\t\25\2\2\u020c\u020d\t\r\2\2\u020d")
buf.write(u"\u020e\t\t\2\2\u020el\3\2\2\2\u020f\u0210\t\25\2\2\u0210")
buf.write(u"\u0211\t\2\2\2\u0211\u0212\t\b\2\2\u0212\u0213\t\17\2")
buf.write(u"\2\u0213\u0214\t\16\2\2\u0214n\3\2\2\2\u0215\u0216\t")
buf.write(u"\25\2\2\u0216\u0217\t\n\2\2\u0217p\3\2\2\2\u0218\u0219")
buf.write(u"\t\25\2\2\u0219\u021a\t\6\2\2\u021a\u021b\t\20\2\2\u021b")
buf.write(u"\u021c\t\16\2\2\u021cr\3\2\2\2\u021d\u021e\t\25\2\2\u021e")
buf.write(u"\u021f\t\6\2\2\u021f\u0220\t\20\2\2\u0220\u0221\t\16")
buf.write(u"\2\2\u0221\u0222\t\3\2\2\u0222\u0223\t\20\2\2\u0223\u0224")
buf.write(u"\t\r\2\2\u0224\u0225\t\5\2\2\u0225\u0226\t\24\2\2\u0226")
buf.write(u"t\3\2\2\2\u0227\u0228\t\4\2\2\u0228\u0229\t\13\2\2\u0229")
buf.write(u"\u022a\t\17\2\2\u022a\u022b\t\n\2\2\u022bv\3\2\2\2\u022c")
buf.write(u"\u022d\t\4\2\2\u022d\u022e\t\13\2\2\u022e\u022f\t\b\2")
buf.write(u"\2\u022fx\3\2\2\2\u0230\u0231\t\4\2\2\u0231\u0232\t\13")
buf.write(u"\2\2\u0232\u0233\t\b\2\2\u0233\u0234\t\17\2\2\u0234\u0235")
buf.write(u"\t\20\2\2\u0235\u0236\t\16\2\2\u0236\u0237\t\7\2\2\u0237")
buf.write(u"\u0238\t\r\2\2\u0238\u0239\t\b\2\2\u0239\u023a\t\4\2")
buf.write(u"\2\u023az\3\2\2\2\u023b\u023c\t\4\2\2\u023c\u023d\t\6")
buf.write(u"\2\2\u023d\u023e\t\n\2\2\u023e|\3\2\2\2\u023f\u0240\t")
buf.write(u"\7\2\2\u0240\u0241\t\2\2\2\u0241\u0242\t\b\2\2\u0242")
buf.write(u"~\3\2\2\2\u0243\u0244\t\7\2\2\u0244\u0245\t\16\2\2\u0245")
buf.write(u"\u0246\t\25\2\2\u0246\u0247\t\n\2\2\u0247\u0080\3\2\2")
buf.write(u"\2\u0248\u0249\t\7\2\2\u0249\u024a\t\30\2\2\u024a\u024b")
buf.write(u"\t\16\2\2\u024b\u024c\t\b\2\2\u024c\u0082\3\2\2\2\u024d")
buf.write(u"\u024e\t\7\2\2\u024e\u024f\t\25\2\2\u024f\u0250\t\2\2")
buf.write(u"\2\u0250\u0251\t\27\2\2\u0251\u0252\t\16\2\2\u0252\u0084")
buf.write(u"\3\2\2\2\u0253\u0254\t\7\2\2\u0254\u0255\t\25\2\2\u0255")
buf.write(u"\u0256\t\13\2\2\u0256\u0257\t\2\2\2\u0257\u0258\t\b\2")
buf.write(u"\2\u0258\u0086\3\2\2\2\u0259\u025a\t\25\2\2\u025a\u025b")
buf.write(u"\t\16\2\2\u025b\u025c\t\2\2\2\u025c\u025d\t\20\2\2\u025d")
buf.write(u"\u0088\3\2\2\2\u025e\u025f\t\26\2\2\u025f\u0260\t\2\2")
buf.write(u"\2\u0260\u0261\t\25\2\2\u0261\u0262\7a\2\2\u0262\u0263")
buf.write(u"\t\13\2\2\u0263\u0264\t\b\2\2\u0264\u0265\t\27\2\2\u0265")
buf.write(u"\u0266\t\6\2\2\u0266\u0267\t\7\2\2\u0267\u008a\3\2\2")
buf.write(u"\2\u0268\u0269\t\26\2\2\u0269\u026a\t\2\2\2\u026a\u026b")
buf.write(u"\t\25\2\2\u026b\u026c\7a\2\2\u026c\u026d\t\r\2\2\u026d")
buf.write(u"\u026e\t\6\2\2\u026e\u026f\t\7\2\2\u026f\u0270\t\27\2")
buf.write(u"\2\u0270\u0271\t\6\2\2\u0271\u0272\t\7\2\2\u0272\u008c")
buf.write(u"\3\2\2\2\u0273\u0274\t\31\2\2\u0274\u0275\t\13\2\2\u0275")
buf.write(u"\u0276\t\7\2\2\u0276\u0277\t\30\2\2\u0277\u008e\3\2\2")
buf.write(u"\2\u0278\u027a\t\32\2\2\u0279\u0278\3\2\2\2\u027a\u027b")
buf.write(u"\3\2\2\2\u027b\u0279\3\2\2\2\u027b\u027c\3\2\2\2\u027c")
buf.write(u"\u027d\3\2\2\2\u027d\u027e\bH\2\2\u027e\u0090\3\2\2\2")
buf.write(u"\u027f\u0281\7\17\2\2\u0280\u027f\3\2\2\2\u0280\u0281")
buf.write(u"\3\2\2\2\u0281\u0282\3\2\2\2\u0282\u0284\7\f\2\2\u0283")
buf.write(u"\u0280\3\2\2\2\u0284\u0285\3\2\2\2\u0285\u0283\3\2\2")
buf.write(u"\2\u0285\u0286\3\2\2\2\u0286\u0287\3\2\2\2\u0287\u0288")
buf.write(u"\bI\2\2\u0288\u0092\3\2\2\2\u0289\u028a\7<\2\2\u028a")
buf.write(u"\u028b\7?\2\2\u028b\u0094\3\2\2\2\u028c\u028d\7<\2\2")
buf.write(u"\u028d\u0096\3\2\2\2\u028e\u028f\7.\2\2\u028f\u0098\3")
buf.write(u"\2\2\2\u0290\u0291\7\60\2\2\u0291\u009a\3\2\2\2\u0292")
buf.write(u"\u0293\7\60\2\2\u0293\u0294\7\60\2\2\u0294\u009c\3\2")
buf.write(u"\2\2\u0295\u0296\7`\2\2\u0296\u009e\3\2\2\2\u0297\u0298")
buf.write(u"\7}\2\2\u0298\u00a0\3\2\2\2\u0299\u029a\7*\2\2\u029a")
buf.write(u"\u00a2\3\2\2\2\u029b\u029c\7/\2\2\u029c\u00a4\3\2\2\2")
buf.write(u"\u029d\u029e\7\'\2\2\u029e\u00a6\3\2\2\2\u029f\u02a0")
buf.write(u"\7-\2\2\u02a0\u00a8\3\2\2\2\u02a1\u02a2\7\177\2\2\u02a2")
buf.write(u"\u00aa\3\2\2\2\u02a3\u02a4\7+\2\2\u02a4\u00ac\3\2\2\2")
buf.write(u"\u02a5\u02a6\7=\2\2\u02a6\u00ae\3\2\2\2\u02a7\u02a8\7")
buf.write(u"\61\2\2\u02a8\u00b0\3\2\2\2\u02a9\u02aa\7,\2\2\u02aa")
buf.write(u"\u00b2\3\2\2\2\u02ab\u02ad\5\u00b5[\2\u02ac\u02ab\3\2")
buf.write(u"\2\2\u02ad\u02ae\3\2\2\2\u02ae\u02ac\3\2\2\2\u02ae\u02af")
buf.write(u"\3\2\2\2\u02af\u00b4\3\2\2\2\u02b0\u02b1\4\62;\2\u02b1")
buf.write(u"\u00b6\3\2\2\2\u02b2\u02b5\5\u00b9]\2\u02b3\u02b5\5\u00bb")
buf.write(u"^\2\u02b4\u02b2\3\2\2\2\u02b4\u02b3\3\2\2\2\u02b5\u00b8")
buf.write(u"\3\2\2\2\u02b6\u02b7\4c|\2\u02b7\u00ba\3\2\2\2\u02b8")
buf.write(u"\u02b9\4C\\\2\u02b9\u00bc\3\2\2\2\u02ba\u02bd\5\u00b7")
buf.write(u"\\\2\u02bb\u02bd\5\u00b5[\2\u02bc\u02ba\3\2\2\2\u02bc")
buf.write(u"\u02bb\3\2\2\2\u02bd\u00be\3\2\2\2\u02be\u02c1\5\u00a7")
buf.write(u"T\2\u02bf\u02c1\5\u00a3R\2\u02c0\u02be\3\2\2\2\u02c0")
buf.write(u"\u02bf\3\2\2\2\u02c0\u02c1\3\2\2\2\u02c1\u02c2\3\2\2")
buf.write(u"\2\u02c2\u02c5\5\u00b3Z\2\u02c3\u02c4\7\60\2\2\u02c4")
buf.write(u"\u02c6\5\u00b3Z\2\u02c5\u02c3\3\2\2\2\u02c5\u02c6\3\2")
buf.write(u"\2\2\u02c6\u02cd\3\2\2\2\u02c7\u02ca\t\16\2\2\u02c8\u02cb")
buf.write(u"\5\u00a7T\2\u02c9\u02cb\5\u00a3R\2\u02ca\u02c8\3\2\2")
buf.write(u"\2\u02ca\u02c9\3\2\2\2\u02ca\u02cb\3\2\2\2\u02cb\u02cc")
buf.write(u"\3\2\2\2\u02cc\u02ce\5\u00b3Z\2\u02cd\u02c7\3\2\2\2\u02cd")
buf.write(u"\u02ce\3\2\2\2\u02ce\u00c0\3\2\2\2\u02cf\u02d0\7*\2\2")
buf.write(u"\u02d0\u02d1\7,\2\2\u02d1\u02d5\3\2\2\2\u02d2\u02d4\13")
buf.write(u"\2\2\2\u02d3\u02d2\3\2\2\2\u02d4\u02d7\3\2\2\2\u02d5")
buf.write(u"\u02d6\3\2\2\2\u02d5\u02d3\3\2\2\2\u02d6\u02d8\3\2\2")
buf.write(u"\2\u02d7\u02d5\3\2\2\2\u02d8\u02d9\7,\2\2\u02d9\u02da")
buf.write(u"\7+\2\2\u02da\u02dc\3\2\2\2\u02db\u02dd\5\u0091I\2\u02dc")
buf.write(u"\u02db\3\2\2\2\u02dc\u02dd\3\2\2\2\u02dd\u02de\3\2\2")
buf.write(u"\2\u02de\u02df\ba\2\2\u02df\u00c2\3\2\2\2\u02e0\u02e1")
buf.write(u"\7\61\2\2\u02e1\u02e2\7,\2\2\u02e2\u02e6\3\2\2\2\u02e3")
buf.write(u"\u02e5\13\2\2\2\u02e4\u02e3\3\2\2\2\u02e5\u02e8\3\2\2")
buf.write(u"\2\u02e6\u02e7\3\2\2\2\u02e6\u02e4\3\2\2\2\u02e7\u02e9")
buf.write(u"\3\2\2\2\u02e8\u02e6\3\2\2\2\u02e9\u02ea\7,\2\2\u02ea")
buf.write(u"\u02eb\7\61\2\2\u02eb\u02ed\3\2\2\2\u02ec\u02ee\5\u0091")
buf.write(u"I\2\u02ed\u02ec\3\2\2\2\u02ed\u02ee\3\2\2\2\u02ee\u02ef")
buf.write(u"\3\2\2\2\u02ef\u02f0\bb\2\2\u02f0\u00c4\3\2\2\2\u02f1")
buf.write(u"\u02f2\7\61\2\2\u02f2\u02f3\7\61\2\2\u02f3\u02f7\3\2")
buf.write(u"\2\2\u02f4\u02f6\n\33\2\2\u02f5\u02f4\3\2\2\2\u02f6\u02f9")
buf.write(u"\3\2\2\2\u02f7\u02f5\3\2\2\2\u02f7\u02f8\3\2\2\2\u02f8")
buf.write(u"\u02fa\3\2\2\2\u02f9\u02f7\3\2\2\2\u02fa\u02fb\5\u0091")
buf.write(u"I\2\u02fb\u02fc\3\2\2\2\u02fc\u02fd\bc\2\2\u02fd\u00c6")
buf.write(u"\3\2\2\2\u02fe\u0303\5\u00b7\\\2\u02ff\u0302\5\u00bd")
buf.write(u"_\2\u0300\u0302\7a\2\2\u0301\u02ff\3\2\2\2\u0301\u0300")
buf.write(u"\3\2\2\2\u0302\u0305\3\2\2\2\u0303\u0301\3\2\2\2\u0303")
buf.write(u"\u0304\3\2\2\2\u0304\u00c8\3\2\2\2\u0305\u0303\3\2\2")
buf.write(u"\2\24\2\u027b\u0280\u0285\u02ae\u02b4\u02bc\u02c0\u02c5")
buf.write(u"\u02ca\u02cd\u02d5\u02dc\u02e6\u02ed\u02f7\u0301\u0303")
buf.write(u"\3\b\2\2")
return buf.getvalue()
class FclLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
ABS = 1
ACCU = 2
ACT = 3
AND = 4
ASUM = 5
BDIF = 6
BSUM = 7
COA = 8
COSINE = 9
COG = 10
BOA = 11
MOM = 12
SOM = 13
LOM = 14
COGS = 15
COGF = 16
COS = 17
DEFAULT = 18
DEFUZZIFY = 19
DMAX = 20
DMIN = 21
DSIGM = 22
EINSTEIN = 23
END_DEFUZZIFY = 24
END_FUNCTION_BLOCK = 25
END_FUZZIFY = 26
END_RULEBLOCK = 27
END_VAR = 28
EXP = 29
HAMACHER = 30
FUNCTION = 31
GAUSS = 32
GAUSS2 = 33
GBELL = 34
FUNCTION_BLOCK = 35
FUZZIFY = 36
IF = 37
IS = 38
LM = 39
LN = 40
LOG = 41
MAX = 42
METHOD = 43
MIN = 44
NIPMIN = 45
NIPMAX = 46
MM = 47
NC = 48
NOT = 49
NSUM = 50
OR = 51
PROBOR = 52
PROD = 53
RANGE = 54
RM = 55
RULE = 56
RULEBLOCK = 57
SIGM = 58
SIN = 59
SINGLETONS = 60
SUM = 61
TAN = 62
TERM = 63
THEN = 64
TRAPE = 65
TRIAN = 66
TYPE_REAL = 67
VAR_INPUT = 68
VAR_OUTPUT = 69
WITH = 70
WS = 71
NEWLINE = 72
ASSIGN_OPERATOR = 73
COLON = 74
COMMA = 75
DOT = 76
DOTS = 77
HAT = 78
LEFT_CURLY = 79
LEFT_PARENTHESIS = 80
MINUS = 81
PERCENT = 82
PLUS = 83
RIGHT_CURLY = 84
RIGHT_PARENTHESIS = 85
SEMICOLON = 86
SLASH = 87
STAR = 88
REAL = 89
COMMENT = 90
COMMENT_C = 91
COMMENT_SL = 92
ID = 93
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"':'", u"','", u"'.'", u"'..'", u"'^'", u"'{'", u"'('", u"'-'",
u"'%'", u"'+'", u"'}'", u"')'", u"';'", u"'/'", u"'*'" ]
symbolicNames = [ u"<INVALID>",
u"ABS", u"ACCU", u"ACT", u"AND", u"ASUM", u"BDIF", u"BSUM",
u"COA", u"COSINE", u"COG", u"BOA", u"MOM", u"SOM", u"LOM", u"COGS",
u"COGF", u"COS", u"DEFAULT", u"DEFUZZIFY", u"DMAX", u"DMIN",
u"DSIGM", u"EINSTEIN", u"END_DEFUZZIFY", u"END_FUNCTION_BLOCK",
u"END_FUZZIFY", u"END_RULEBLOCK", u"END_VAR", u"EXP", u"HAMACHER",
u"FUNCTION", u"GAUSS", u"GAUSS2", u"GBELL", u"FUNCTION_BLOCK",
u"FUZZIFY", u"IF", u"IS", u"LM", u"LN", u"LOG", u"MAX", u"METHOD",
u"MIN", u"NIPMIN", u"NIPMAX", u"MM", u"NC", u"NOT", u"NSUM",
u"OR", u"PROBOR", u"PROD", u"RANGE", u"RM", u"RULE", u"RULEBLOCK",
u"SIGM", u"SIN", u"SINGLETONS", u"SUM", u"TAN", u"TERM", u"THEN",
u"TRAPE", u"TRIAN", u"TYPE_REAL", u"VAR_INPUT", u"VAR_OUTPUT",
u"WITH", u"WS", u"NEWLINE", u"ASSIGN_OPERATOR", u"COLON", u"COMMA",
u"DOT", u"DOTS", u"HAT", u"LEFT_CURLY", u"LEFT_PARENTHESIS",
u"MINUS", u"PERCENT", u"PLUS", u"RIGHT_CURLY", u"RIGHT_PARENTHESIS",
u"SEMICOLON", u"SLASH", u"STAR", u"REAL", u"COMMENT", u"COMMENT_C",
u"COMMENT_SL", u"ID" ]
ruleNames = [ u"ABS", u"ACCU", u"ACT", u"AND", u"ASUM", u"BDIF", u"BSUM",
u"COA", u"COSINE", u"COG", u"BOA", u"MOM", u"SOM", u"LOM",
u"COGS", u"COGF", u"COS", u"DEFAULT", u"DEFUZZIFY", u"DMAX",
u"DMIN", u"DSIGM", u"EINSTEIN", u"END_DEFUZZIFY", u"END_FUNCTION_BLOCK",
u"END_FUZZIFY", u"END_RULEBLOCK", u"END_VAR", u"EXP",
u"HAMACHER", u"FUNCTION", u"GAUSS", u"GAUSS2", u"GBELL",
u"FUNCTION_BLOCK", u"FUZZIFY", u"IF", u"IS", u"LM", u"LN",
u"LOG", u"MAX", u"METHOD", u"MIN", u"NIPMIN", u"NIPMAX",
u"MM", u"NC", u"NOT", u"NSUM", u"OR", u"PROBOR", u"PROD",
u"RANGE", u"RM", u"RULE", u"RULEBLOCK", u"SIGM", u"SIN",
u"SINGLETONS", u"SUM", u"TAN", u"TERM", u"THEN", u"TRAPE",
u"TRIAN", u"TYPE_REAL", u"VAR_INPUT", u"VAR_OUTPUT", u"WITH",
u"WS", u"NEWLINE", u"ASSIGN_OPERATOR", u"COLON", u"COMMA",
u"DOT", u"DOTS", u"HAT", u"LEFT_CURLY", u"LEFT_PARENTHESIS",
u"MINUS", u"PERCENT", u"PLUS", u"RIGHT_CURLY", u"RIGHT_PARENTHESIS",
u"SEMICOLON", u"SLASH", u"STAR", u"NUMBER", u"DIGIT",
u"LETTER", u"LOWER", u"UPPER", u"ALPHANUM", u"REAL", u"COMMENT",
u"COMMENT_C", u"COMMENT_SL", u"ID" ]
grammarFileName = u"Fcl.g4"
def __init__(self, input=None):
super(FclLexer, self).__init__(input)
self.checkVersion("4.5.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
arruda/scikit_fuzzy_fcl
|
scikit_fuzzy_fcl/py2_parser/FclLexer.py
|
Python
|
mit
| 31,626
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models, utils
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
try:
for player in orm.PlayerDB.objects.all():
if not player.db_cmdset_storage:
player.db_cmdset_storage = settings.CMDSET_OOC
player.save()
except utils.DatabaseError:
# this will happen if we start db from scratch (ignore in that case)
pass
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("This migration cannot be reverted.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'objects.objectdb': {
'Meta': {'object_name': 'ObjectDB'},
'db_cmdset_storage': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destinations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_home': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'homes_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'db_location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'db_player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']", 'null': 'True', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'players.playerattribute': {
'Meta': {'object_name': 'PlayerAttribute'},
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']"}),
'db_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'players.playerdb': {
'Meta': {'object_name': 'PlayerDB'},
'db_cmdset_storage': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']", 'null': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'players.playernick': {
'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'PlayerNick'},
'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']"}),
'db_real': ('django.db.models.fields.TextField', [], {}),
'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['players']
|
YourCyborg/Sun-RPI
|
src/players/migrations/0005_adding_player_cmdset.py
|
Python
|
bsd-3-clause
| 7,810
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, layer_name, block_index, \
layer_input, layer_kernel, layer_stride, layer_padding, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
layer_kernel['layer%s.%s.conv1' %(layer_name, block_index)] = 3
layer_stride['layer%s.%s.conv1' %(layer_name, block_index)] = stride
layer_padding['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_kernel['layer%s.%s.conv2' %(layer_name, block_index)] = 3
layer_stride['layer%s.%s.conv2' %(layer_name, block_index)] = stride
layer_padding['layer%s.%s.conv2' %(layer_name, block_index)] = 1
self.layer_input = layer_input
self.layer_name = layer_name
self.block_index = block_index
# self.exist_downsample = False
def forward(self, x):
residual = x
self.layer_input['layer%s.%s.conv1' %(self.layer_name, self.block_index)] = x.data
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
self.layer_input['layer%s.%s.conv2' %(self.layer_name, self.block_index)] = out.data
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
self.layer_input['layer%s.%s.downsample.0' %(self.layer_name, self.block_index)] = x.data
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, layer_name, block_index, \
layer_input, layer_kernel, layer_stride, layer_padding, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
layer_kernel['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_stride['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_padding['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_kernel['layer%s.%s.conv2' %(layer_name, block_index)] = 3
layer_stride['layer%s.%s.conv2' %(layer_name, block_index)] = stride
layer_padding['layer%s.%s.conv2' %(layer_name, block_index)] = 1
layer_kernel['layer%s.%s.conv3' %(layer_name, block_index)] = 1
layer_stride['layer%s.%s.conv3' %(layer_name, block_index)] = 1
layer_padding['layer%s.%s.conv3' %(layer_name, block_index)] = 1
self.layer_input = layer_input
self.layer_name = layer_name
self.block_index = block_index
def forward(self, x):
residual = x
self.layer_input['layer%s.%s.conv1' %(self.layer_name, self.block_index)] = x.data
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
self.layer_input['layer%s.%s.conv2' %(self.layer_name, self.block_index)] = out.data
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
self.layer_input['layer%s.%s.conv3' %(self.layer_name, self.block_index)] = out.data
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
self.layer_input['layer%s.%s.downsample.0' %(self.layer_name, self.block_index)] = x.data
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
# Modified by Chen Shangyu to get layer inputs
self.layer_input = dict()
self.layer_kernel = {'conv1': 7}
self.layer_stride = {'conv1': 2}
self.layer_padding = {'conv1': 3}
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], layer_name='1')
self.layer2 = self._make_layer(block, 128, layers[1], layer_name='2', stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], layer_name='3', stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], layer_name='4', stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, layer_name, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
self.layer_kernel['layer%s.0.downsample.0' %layer_name] = 1
self.layer_stride['layer%s.0.downsample.0' %layer_name] = stride
self.layer_padding['layer%s.0.downsample.0' %layer_name] = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
# def __init__(self, inplanes, planes, layer_name, block_index, \
# layer_input, layer_kernel, layer_stride, layer_padding, stride=1, downsample=None):
layers = []
layers.append(block(self.inplanes, planes, layer_name, block_index = 0,
layer_input = self.layer_input,
layer_kernel = self.layer_kernel,
layer_stride = self.layer_stride,
layer_padding = self.layer_padding,
stride = stride,
downsample = downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, layer_name, block_index = i,
layer_input = self.layer_input,
layer_kernel = self.layer_kernel,
layer_stride = self.layer_stride,
layer_padding = self.layer_padding))
return nn.Sequential(*layers)
def forward(self, x):
self.layer_input['conv1'] = x.data
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
self.layer_input['fc'] = x.data
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
csyhhu/L-OBS
|
PyTorch/ImageNet/models/resnet_layer_input.py
|
Python
|
mit
| 10,119
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/mte90/Desktop/Prog/LearnHotkeys/defdialog.ui'
#
# Created: Tue Dec 18 17:51:54 2012
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_DefDialog(object):
def setupUi(self, DefDialog):
DefDialog.setObjectName(_fromUtf8("DefDialog"))
DefDialog.resize(392, 309)
DefDialog.setModal(True)
self.pushApply = QtGui.QPushButton(DefDialog)
self.pushApply.setGeometry(QtCore.QRect(280, 280, 97, 25))
self.pushApply.setObjectName(_fromUtf8("pushApply"))
self.groupBox = QtGui.QGroupBox(DefDialog)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 371, 121))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.comboDef = QtGui.QComboBox(self.groupBox)
self.comboDef.setObjectName(_fromUtf8("comboDef"))
self.gridLayout.addWidget(self.comboDef, 0, 0, 1, 1)
self.labelDef = QtGui.QLabel(self.groupBox)
self.labelDef.setText(_fromUtf8(""))
self.labelDef.setObjectName(_fromUtf8("labelDef"))
self.gridLayout.addWidget(self.labelDef, 1, 0, 1, 1)
self.pushUpdate = QtGui.QPushButton(DefDialog)
self.pushUpdate.setGeometry(QtCore.QRect(180, 280, 94, 25))
self.pushUpdate.setObjectName(_fromUtf8("pushUpdate"))
self.pushDownload = QtGui.QPushButton(DefDialog)
self.pushDownload.setGeometry(QtCore.QRect(80, 280, 94, 25))
self.pushDownload.setObjectName(_fromUtf8("pushDownload"))
self.groupBox_2 = QtGui.QGroupBox(DefDialog)
self.groupBox_2.setGeometry(QtCore.QRect(10, 130, 371, 141))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.listUpdate = QtGui.QListView(self.groupBox_2)
self.listUpdate.setProperty("showDropIndicator", False)
self.listUpdate.setObjectName(_fromUtf8("listUpdate"))
self.gridLayout_2.addWidget(self.listUpdate, 0, 0, 1, 1)
self.retranslateUi(DefDialog)
QtCore.QMetaObject.connectSlotsByName(DefDialog)
def retranslateUi(self, DefDialog):
DefDialog.setWindowTitle(QtGui.QApplication.translate("DefDialog", "Choose Definition", None, QtGui.QApplication.UnicodeUTF8))
self.pushApply.setText(QtGui.QApplication.translate("DefDialog", "Apply", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("DefDialog", "Choose Definition File", None, QtGui.QApplication.UnicodeUTF8))
self.pushUpdate.setText(QtGui.QApplication.translate("DefDialog", "Update List", None, QtGui.QApplication.UnicodeUTF8))
self.pushDownload.setText(QtGui.QApplication.translate("DefDialog", "Download", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("DefDialog", "Update", None, QtGui.QApplication.UnicodeUTF8))
|
Mte90/LearnHotkeys
|
ui_defdialog.py
|
Python
|
gpl-3.0
| 3,312
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: simple_recon
:platform: Unix
:synopsis: A simple implementation of a reconstruction routine for testing
purposes
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
from savu.plugins.base_recon import BaseRecon
from savu.data.plugin_list import CitationInformation
from savu.plugins.driver.cpu_plugin import CpuPlugin
import numpy as np
from savu.plugins.utils import register_plugin
@register_plugin
class SimpleRecon(BaseRecon, CpuPlugin):
"""
A Plugin to apply a simple reconstruction with no dependancies
"""
def __init__(self):
super(SimpleRecon, self).__init__("SimpleRecon")
def _filter(self, sinogram):
ff = np.arange(sinogram.shape[0])
ff -= sinogram.shape[0]/2
ff = np.abs(ff)
fs = np.fft.fft(sinogram)
ffs = fs*ff
return np.fft.ifft(ffs).real
def _back_project(self, mapping, sino_element, centre):
mapping_array = mapping+centre
return sino_element[mapping_array.astype('int')]
def _mapping_array(self, shape, center, theta):
x, y = np.meshgrid(np.arange(-center[0], shape[0] - center[0]),
np.arange(-center[1], shape[1] - center[1]))
return x*np.cos(theta) - y*np.sin(theta)
def reconstruct(self, sino, centre_of_rotations, angles, vol_shape, init):
sinogram = sino[:, np.newaxis, :]
try:
centre = self.kwargs['centre']
except:
centre = (vol_shape[0]/2, vol_shape[1]/2)
results = []
for j in range(sinogram.shape[1]):
result = np.zeros(vol_shape, dtype=np.float32)
for i in range(sinogram.shape[0]):
theta = i*(np.pi/sinogram.shape[0])
mapping_array = self._mapping_array(vol_shape, centre, theta)
filt = np.zeros(sinogram.shape[2]*3, dtype=np.float32)
filt[sinogram.shape[2]:sinogram.shape[2]*2] = \
self._filter(np.log(np.nan_to_num(sinogram[i, j, :])+1))
result += \
self._back_project(mapping_array, filt,
(centre_of_rotations +
sinogram.shape[2]))
results.append(result[:, np.newaxis, :])
result = np.hstack(results)
return result
def get_max_frames(self):
return 1
def get_citation_information(self):
cite_info = CitationInformation()
cite_info.description = \
("The Tomographic reconstruction performed in this processing " +
"chain is derived from this work.")
cite_info.bibtex = \
("@book{avinash2001principles,\n" +
" title={Principles of computerized tomographic imaging},\n" +
" author={Avinash C.. Kak and Slaney, Malcolm},\n" +
" year={2001},\n" +
" publisher={Society for Industrial and Applied Mathematics}\n" +
"}")
cite_info.endnote = \
("%0 Book\n" +
"%T Principles of computerized tomographic imaging\n" +
"%A Avinash C.. Kak\n" +
"%A Slaney, Malcolm\n" +
"%@ 089871494X\n" +
"%D 2001\n" +
"%I Society for Industrial and Applied Mathematics")
cite_info.doi = "http://dx.doi.org/10.1137/1.9780898719277"
return cite_info
|
FedeMPouzols/Savu
|
savu/plugins/reconstructions/simple_recon.py
|
Python
|
gpl-3.0
| 4,028
|
# -*- coding: utf-8 -*-
import logging
import json
from copy import deepcopy
import time
import uuid
from .queue import factory
class Client(object):
job_tpl = {'id': None,
'msg': None,
'create_time': None,
'timeout': None,
'finish_time': None,
'status': None,
'result': None,
'errors': None}
def __init__(self, rediscon, qname, qtype, sleep_func,
max_poll_time=120, poll_freq=1):
self.log = logging.getLogger(self.__class__.__name__)
self._rediscon = rediscon
self._queue = factory(qtype, rediscon, qname)
self.sleep_func = sleep_func
self.max_poll_time = max_poll_time
self.poll_freq = poll_freq
def qsize(self):
return len(self._queue)
def send(self, msg, timeout=None):
"""
Creates a job submits to the queue
and polls until job is done or job times out
"""
job = self._create_job(msg, timeout)
status = self._poll(job)
ret = self._rediscon.hgetall(job['id'])
self._rediscon.delete(job['id'])
return ret
def _create_job(self, msg, timeout):
job = deepcopy(self.job_tpl)
job['id'] = uuid.uuid4().hex
job['msg'] = json.dumps(msg)
job['create_time'] = int(time.time())
job['timeout'] = timeout
job['status'] = 'new'
self._rediscon.hmset(job['id'], job)
self._queue.push(job['id'])
return job
def _poll(self, job):
timeout = job['timeout'] if job['timeout'] else self.max_poll_time
poll_until = int(time.time()) + timeout
status = None
while True:
if time.time() > poll_until:
self._rediscon.hset(job['id'], 'status', 'expire')
break
status = self._get_status(job)
if status in ('complete', 'fail', 'expire' ):
break
self.sleep_func(self.poll_freq)
return self._get_status(job)
def _is_completed(self, job):
status = self._get_status(job)
return status in ('complete', 'fail')
def _get_status(self, job):
return self._rediscon.hget(job['id'], 'status')
|
gosom/proteraios-queue
|
proteraios/client.py
|
Python
|
mit
| 2,327
|
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import io
import itertools
import re
import sys
class BadInput(Exception):
"""Unsupported input has been found."""
class SwitchCase(object):
"""Represents a CASE block."""
def __init__(self, identifier, block):
self.identifier = identifier
self.block = block
class Optimizer(object):
"""Generates optimized identifier matching code."""
def __init__(self, output_file, array_variable, length_variable):
self.output_file = output_file
self.array_variable = array_variable
self.length_variable = length_variable
def inspect(self, cases):
lengths = list(set([len(c.identifier) for c in cases]))
lengths.sort()
def response(length):
self.inspect_array([c for c in cases if len(c.identifier) == length], range(length))
self.write_selection(self.length_variable, lengths, str, response)
def score(self, alternatives):
return -sum([len(list(count)) ** 2 for _, count in itertools.groupby(sorted(alternatives))])
def choose_selection_pos(self, cases, pending):
candidates = [pos for pos in pending if all(alternative.isalpha() for alternative in [c.identifier[pos] for c in cases])]
if not candidates:
raise BadInput('Case-insensitive switching on non-alphabetic characters not yet implemented')
return sorted(candidates, key=lambda pos: self.score([c.identifier[pos] for c in cases]))[0]
def inspect_array(self, cases, pending):
assert len(cases) >= 1
if pending:
common = [pos for pos in pending
if len(set([c.identifier[pos] for c in cases])) == 1]
if common:
identifier = cases[0].identifier
for index in xrange(len(common)):
if index == 0:
self.output_file.write(u'if (LIKELY(')
else:
self.output_file.write(u' && ')
pos = common[index]
if identifier[pos].isalpha():
self.output_file.write("(%s[%d] | 0x20) == '%s'" %
(self.array_variable, pos, identifier[pos]))
else:
self.output_file.write("%s[%d] == '%s'" %
(self.array_variable, pos, identifier[pos]))
self.output_file.write(u')) {\n')
next_pending = list(set(pending) - set(common))
next_pending.sort()
self.inspect_array(cases, next_pending)
self.output_file.write(u'}\n')
else:
pos = self.choose_selection_pos(cases, pending)
next_pending = filter(lambda p: p != pos, pending)
alternatives = list(set([c.identifier[pos] for c in cases]))
alternatives.sort()
def literal(alternative):
if isinstance(alternative, int):
return str(alternative)
else:
return "'%s'" % alternative
def response(alternative):
self.inspect_array([c for c in cases if c.identifier[pos] == alternative],
next_pending)
expression = '(%s[%d] | 0x20)' % (self.array_variable, pos)
self.write_selection(expression, alternatives, literal, response)
else:
assert len(cases) == 1
for block_line in cases[0].block:
self.output_file.write(block_line)
def write_selection(self, expression, alternatives, literal, response):
if len(alternatives) == 1:
self.output_file.write(u'if (LIKELY(%s == %s)) {\n' % (expression, literal(alternatives[0])))
response(alternatives[0])
self.output_file.write(u'}\n')
elif len(alternatives) == 2:
self.output_file.write(u'if (%s == %s) {\n' % (expression, literal(alternatives[0])))
response(alternatives[0])
self.output_file.write(u'} else if (LIKELY(%s == %s)) {\n' % (expression, literal(alternatives[1])))
response(alternatives[1])
self.output_file.write(u'}\n')
else:
self.output_file.write('switch (%s) {\n' % expression)
for alternative in alternatives:
self.output_file.write(u'case %s: {\n' % literal(alternative))
response(alternative)
self.output_file.write(u'} break;\n')
self.output_file.write(u'}\n')
class LineProcessor(object):
def process_line(self, line):
pass
class MainLineProcessor(LineProcessor):
"""Processes the contents of an input file."""
SWITCH_PATTERN = re.compile(r'\s*SWITCH\s*\((\w*),\s*(\w*)\) \{$')
def __init__(self, output_file):
self.output_file = output_file
def process_line(self, line):
match_switch = MainLineProcessor.SWITCH_PATTERN.match(line)
if match_switch:
array_variable = match_switch.group(1)
length_variable = match_switch.group(2)
return SwitchLineProcessor(self, self.output_file, array_variable, length_variable)
else:
self.output_file.write(line)
return self
class SwitchLineProcessor(LineProcessor):
"""Processes the contents of a SWITCH block."""
CASE_PATTERN = re.compile(r'\s*CASE\s*\(\"([a-z0-9_\-\(]*)\"\) \{$')
CLOSE_BRACE_PATTERN = re.compile(r'\s*\}$')
EMPTY_PATTERN = re.compile(r'\s*$')
def __init__(self, parent, output_file, array_variable, length_variable):
self.parent = parent
self.output_file = output_file
self.array_variable = array_variable
self.length_variable = length_variable
self.cases = []
def process_line(self, line):
match_case = SwitchLineProcessor.CASE_PATTERN.match(line)
match_close_brace = SwitchLineProcessor.CLOSE_BRACE_PATTERN.match(line)
match_empty = SwitchLineProcessor.EMPTY_PATTERN.match(line)
if match_case:
identifier = match_case.group(1)
return CaseLineProcessor(self, self.output_file, identifier)
elif match_close_brace:
Optimizer(self.output_file, self.array_variable, self.length_variable).inspect(self.cases)
return self.parent
elif match_empty:
return self
else:
raise BadInput('Invalid line within SWITCH: %s' % line)
def add_case(self, latest_case):
if latest_case.identifier in [c.identifier for c in self.cases]:
raise BadInput('Repeated case: %s' % latest_case.identifier)
self.cases.append(latest_case)
class CaseLineProcessor(LineProcessor):
"""Processes the contents of a CASE block."""
CLOSE_BRACE_PATTERN = re.compile(r'\s*\}$')
BREAK_PATTERN = re.compile(r'break;')
def __init__(self, parent, output_file, identifier):
self.parent = parent
self.output_file = output_file
self.identifier = identifier
self.block = []
def process_line(self, line):
match_close_brace = CaseLineProcessor.CLOSE_BRACE_PATTERN.match(line)
match_break = CaseLineProcessor.BREAK_PATTERN.search(line)
if match_close_brace:
self.parent.add_case(SwitchCase(self.identifier, self.block))
return self.parent
elif match_break:
raise BadInput('break within CASE not supported: %s' % line)
else:
self.block.append(line)
return self
def process_file(input_name, output_name):
"""Transforms input file into legal C++ source code."""
with io.open(input_name) as input_file:
with io.open(output_name, 'w') as output_file:
processor = MainLineProcessor(output_file)
input_lines = input_file.readlines()
for line in input_lines:
processor = processor.process_line(line)
if __name__ == '__main__':
process_file(sys.argv[1], sys.argv[2])
|
indashnet/InDashNet.Open.UN2000
|
android/external/chromium_org/third_party/WebKit/Source/core/scripts/make_token_matcher.py
|
Python
|
apache-2.0
| 9,695
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import time
from functools import partial
from PyQt4.Qt import QTimer, QDialog, QDialogButtonBox, QCheckBox, QVBoxLayout, QLabel, Qt
from calibre.gui2 import error_dialog
from calibre.gui2.actions import InterfaceAction
class Choose(QDialog):
def __init__(self, fmts, parent=None):
QDialog.__init__(self, parent)
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.setWindowTitle(_('Choose format to edit'))
self.la = la = QLabel(_(
'This book has multiple formats that can be edited. Choose the format you want to edit.'))
l.addWidget(la)
self.rem = QCheckBox(_('Always ask when more than one format is available'))
self.rem.setChecked(True)
l.addWidget(self.rem)
self.bb = bb = QDialogButtonBox(self)
l.addWidget(bb)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
self.buts = buts = []
for fmt in fmts:
b = bb.addButton(fmt.upper(), bb.AcceptRole)
b.clicked.connect(partial(self.chosen, fmt))
buts.append(b)
self.fmt = None
self.resize(self.sizeHint())
def chosen(self, fmt):
self.fmt = fmt
def accept(self):
from calibre.gui2.tweak_book import tprefs
tprefs['choose_tweak_fmt'] = self.rem.isChecked()
QDialog.accept(self)
class TweakEpubAction(InterfaceAction):
name = 'Tweak ePub'
action_spec = (_('Edit Book'), 'tweak.png', _('Edit eBooks'), _('T'))
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
if book_ids:
self.do_tweak(book_ids[0])
def genesis(self):
self.qaction.triggered.connect(self.tweak_book)
def tweak_book(self):
row = self.gui.library_view.currentIndex()
if not row.isValid():
return error_dialog(self.gui, _('Cannot Edit Book'),
_('No book selected'), show=True)
book_id = self.gui.library_view.model().id(row)
self.do_tweak(book_id)
def do_tweak(self, book_id):
if self.gui.current_view() is not self.gui.library_view:
return error_dialog(self.gui, _('Cannot Edit Book'), _(
'Editing of books on the device is not supported'), show=True)
from calibre.ebooks.oeb.polish.main import SUPPORTED
db = self.gui.library_view.model().db
fmts = db.formats(book_id, index_is_id=True) or ''
fmts = [x.upper().strip() for x in fmts.split(',')]
tweakable_fmts = set(fmts).intersection(SUPPORTED)
if not tweakable_fmts:
return error_dialog(self.gui, _('Cannot Edit Book'),
_('The book must be in the %s formats to edit.'
'\n\nFirst convert the book to one of these formats.') % (_(' or '.join(SUPPORTED))),
show=True)
from calibre.gui2.tweak_book import tprefs
tprefs.refresh() # In case they were changed in a Tweak Book process
if len(tweakable_fmts) > 1:
if tprefs['choose_tweak_fmt']:
d = Choose(sorted(tweakable_fmts, key=tprefs.defaults['tweak_fmt_order'].index), self.gui)
if d.exec_() != d.Accepted:
return
tweakable_fmts = {d.fmt}
else:
fmts = [f for f in tprefs['tweak_fmt_order'] if f in tweakable_fmts]
if not fmts:
fmts = [f for f in tprefs.defaults['tweak_fmt_order'] if f in tweakable_fmts]
tweakable_fmts = {fmts[0]}
fmt = tuple(tweakable_fmts)[0]
path = db.new_api.format_abspath(book_id, fmt)
if path is None:
return error_dialog(self.gui, _('File missing'), _(
'The %s format is missing from the calibre library. You should run'
' library maintenance.') % fmt, show=True)
tweak = 'ebook-edit'
self.gui.setCursor(Qt.BusyCursor)
if tprefs['update_metadata_from_calibre']:
from calibre.ebooks.metadata.opf2 import pretty_print
from calibre.ebooks.metadata.meta import set_metadata
mi = db.new_api.get_metadata(book_id, get_cover=True)
with pretty_print, open(path, 'r+b') as f:
set_metadata(f, mi, stream_type=fmt.lower())
notify = '%d:%s:%s:%s' % (book_id, fmt, db.library_id, db.library_path)
try:
self.gui.job_manager.launch_gui_app(tweak, kwargs=dict(path=path, notify=notify))
time.sleep(2)
finally:
self.gui.unsetCursor()
|
insomnia-lab/calibre
|
src/calibre/gui2/actions/tweak_epub.py
|
Python
|
gpl-3.0
| 5,580
|
"""
noop_returner
~~~~~~~~~~~~~
A returner that does nothing which is used to test the salt-master `event_return` functionality
"""
import logging
import salt.utils.jid
log = logging.getLogger(__name__)
__virtualname__ = "runtests_noop"
def __virtual__():
return True
def event_return(events):
log.debug("NOOP_RETURN.event_return - Events: %s", events)
def returner(ret):
log.debug("NOOP_RETURN.returner - Ret: %s", ret)
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
"""
Do any work necessary to prepare a JID, including sending a custom id
"""
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
tests/integration/files/returners/noop_returner.py
|
Python
|
apache-2.0
| 704
|
"""
cur_callback and goal_callback are invoked to detimine what actions should be taken
to reach convergence. Should return a dict where the keys are the set, and the values are a
context to use when adding or removing items.
"""
def enforce(cur_callback, goal_callback, add_callback, remove_callback):
converged = True
cur = cur_callback()
goal = goal_callback()
missing = set(goal.keys()) - set(cur.keys())
for key in missing:
add_callback(key, goal[key])
converged = False
extra = set(cur.keys()) - set(goal.keys())
for key in extra:
remove_callback(key, cur[key])
converged = False
return converged
|
Livefyre/enforcer
|
enforcer/__init__.py
|
Python
|
mit
| 636
|
# Directory tree dictionaries to be used with create_dtree() test tool.
example_home_dir = {
# Mock home directory including duplicate directory names and no uniform naming
# convention for directories.
'Desktop': None,
'devel': {
'c': {
'ch1': None
},
'django': {
'cityTest': {
'cityTest': None,
},
'djangoTheTango': {
'media': None,
'rango': {
'migrations': None,
},
'static': {
'images': None,
'js': None,
'profilepics': None,
},
'tango_with_django_project': None,
'templates': None,
},
'myBlog': {
'blog': {
'migrations': None,
'__pycache__': None,
'static': None,
},
'myBlog': None,
},
'python-markdown2': None,
'tdd': {
'superlists': {
'lists': {
'migrations': None,
'__pycache__': None,
},
},
},
},
'maths': None,
'python': {
'python3': {
'jumpdir': {
'jumpdir': {
'__pycache__': None,
},
'jumpdir.egg-info': None,
'scripts': None,
'tests': None,
},
},
'xonsh': {
'docs': None,
'__pycache__': None,
'scripts': None,
'tests': None,
'xonsh': None,
},
},
'vim': {
'dotvim': None
},
},
'Documents': {
'usr': None,
'work': None,
},
'Downloads': {
'DAVID BOWIE': None,
'Courtney Barnett- Sometimes I sit and think, and sometimes I just sit': None,
'DjangoPi': None,
'JOY DIVISION': None,
},
'Music': None,
'nltk_data': None,
'Pictures': {
'NorthKoreaTrip': None,
'Antartica': None,
'CUBA': None,
},
'Public': None,
'Templates': None,
'Videos': None,
}
|
cgmcintyr/jumpdir
|
tests/example_dtrees.py
|
Python
|
mit
| 2,506
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## simple-ref-count.h: ns3::SimpleRefCount<ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::dot11s::IeBeaconTimingUnit', 'ns3::empty', 'ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
## ie-dot11s-configuration.h: ns3::dot11s::dot11sCongestionControlMode [enumeration]
module.add_enum('dot11sCongestionControlMode', ['CONGESTION_SIGNALING', 'CONGESTION_NULL'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sAuthenticationProtocol [enumeration]
module.add_enum('dot11sAuthenticationProtocol', ['AUTH_NULL', 'AUTH_SAE'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sPathSelectionProtocol [enumeration]
module.add_enum('dot11sPathSelectionProtocol', ['PROTOCOL_HWMP'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sSynchronizationProtocolIdentifier [enumeration]
module.add_enum('dot11sSynchronizationProtocolIdentifier', ['SYNC_NEIGHBOUR_OFFSET', 'SYNC_NULL'])
## ie-dot11s-peer-management.h: ns3::dot11s::PmpReasonCode [enumeration]
module.add_enum('PmpReasonCode', ['REASON11S_PEERING_CANCELLED', 'REASON11S_MESH_MAX_PEERS', 'REASON11S_MESH_CAPABILITY_POLICY_VIOLATION', 'REASON11S_MESH_CLOSE_RCVD', 'REASON11S_MESH_MAX_RETRIES', 'REASON11S_MESH_CONFIRM_TIMEOUT', 'REASON11S_MESH_INVALID_GTK', 'REASON11S_MESH_INCONSISTENT_PARAMETERS', 'REASON11S_MESH_INVALID_SECURITY_CAPABILITY', 'REASON11S_RESERVED'])
## ie-dot11s-configuration.h: ns3::dot11s::dot11sPathSelectionMetric [enumeration]
module.add_enum('dot11sPathSelectionMetric', ['METRIC_AIRTIME'])
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability [class]
module.add_class('Dot11sMeshCapability')
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol [class]
module.add_class('HwmpProtocol', parent=root_module['ns3::MeshL2RoutingProtocol'])
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination [struct]
module.add_class('FailedDestination', outer_class=root_module['ns3::dot11s::HwmpProtocol'])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTiming [class]
module.add_class('IeBeaconTiming', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTimingUnit [class]
module.add_class('IeBeaconTimingUnit', parent=root_module['ns3::SimpleRefCount< ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> >'])
## ie-dot11s-configuration.h: ns3::dot11s::IeConfiguration [class]
module.add_class('IeConfiguration', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-id.h: ns3::dot11s::IeMeshId [class]
module.add_class('IeMeshId', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdChecker [class]
module.add_class('IeMeshIdChecker', parent=root_module['ns3::AttributeChecker'])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue [class]
module.add_class('IeMeshIdValue', parent=root_module['ns3::AttributeValue'])
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement [class]
module.add_class('IePeerManagement', parent=root_module['ns3::WifiInformationElement'])
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement::Subtype [enumeration]
module.add_enum('Subtype', ['PEER_OPEN', 'PEER_CONFIRM', 'PEER_CLOSE'], outer_class=root_module['ns3::dot11s::IePeerManagement'])
## peer-link.h: ns3::dot11s::PeerLink [class]
module.add_class('PeerLink', parent=root_module['ns3::Object'])
## peer-link.h: ns3::dot11s::PeerLink::PeerState [enumeration]
module.add_enum('PeerState', ['IDLE', 'OPN_SNT', 'CNF_RCVD', 'OPN_RCVD', 'ESTAB', 'HOLDING'], outer_class=root_module['ns3::dot11s::PeerLink'])
## peer-management-protocol.h: ns3::dot11s::PeerManagementProtocol [class]
module.add_class('PeerManagementProtocol', parent=root_module['ns3::Object'])
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Dot11sDot11sMeshCapability_methods(root_module, root_module['ns3::dot11s::Dot11sMeshCapability'])
register_Ns3Dot11sHwmpProtocol_methods(root_module, root_module['ns3::dot11s::HwmpProtocol'])
register_Ns3Dot11sHwmpProtocolFailedDestination_methods(root_module, root_module['ns3::dot11s::HwmpProtocol::FailedDestination'])
register_Ns3Dot11sIeBeaconTiming_methods(root_module, root_module['ns3::dot11s::IeBeaconTiming'])
register_Ns3Dot11sIeBeaconTimingUnit_methods(root_module, root_module['ns3::dot11s::IeBeaconTimingUnit'])
register_Ns3Dot11sIeConfiguration_methods(root_module, root_module['ns3::dot11s::IeConfiguration'])
register_Ns3Dot11sIeMeshId_methods(root_module, root_module['ns3::dot11s::IeMeshId'])
register_Ns3Dot11sIeMeshIdChecker_methods(root_module, root_module['ns3::dot11s::IeMeshIdChecker'])
register_Ns3Dot11sIeMeshIdValue_methods(root_module, root_module['ns3::dot11s::IeMeshIdValue'])
register_Ns3Dot11sIePeerManagement_methods(root_module, root_module['ns3::dot11s::IePeerManagement'])
register_Ns3Dot11sPeerLink_methods(root_module, root_module['ns3::dot11s::PeerLink'])
register_Ns3Dot11sPeerManagementProtocol_methods(root_module, root_module['ns3::dot11s::PeerManagementProtocol'])
return
def register_Ns3Dot11sDot11sMeshCapability_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::Dot11sMeshCapability(ns3::dot11s::Dot11sMeshCapability const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::Dot11sMeshCapability const &', 'arg0')])
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::Dot11sMeshCapability() [constructor]
cls.add_constructor([])
## ie-dot11s-configuration.h: ns3::Buffer::Iterator ns3::dot11s::Dot11sMeshCapability::Deserialize(ns3::Buffer::Iterator i) [member function]
cls.add_method('Deserialize',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'i')])
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::Dot11sMeshCapability::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint8_t',
[],
is_const=True)
## ie-dot11s-configuration.h: uint16_t ns3::dot11s::Dot11sMeshCapability::GetUint16() const [member function]
cls.add_method('GetUint16',
'uint16_t',
[],
is_const=True)
## ie-dot11s-configuration.h: bool ns3::dot11s::Dot11sMeshCapability::Is(uint16_t cap, uint8_t n) const [member function]
cls.add_method('Is',
'bool',
[param('uint16_t', 'cap'), param('uint8_t', 'n')],
is_const=True)
## ie-dot11s-configuration.h: ns3::Buffer::Iterator ns3::dot11s::Dot11sMeshCapability::Serialize(ns3::Buffer::Iterator i) const [member function]
cls.add_method('Serialize',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::MCCAEnabled [variable]
cls.add_instance_attribute('MCCAEnabled', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::MCCASupported [variable]
cls.add_instance_attribute('MCCASupported', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::TBTTAdjustment [variable]
cls.add_instance_attribute('TBTTAdjustment', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::acceptPeerLinks [variable]
cls.add_instance_attribute('acceptPeerLinks', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::beaconTimingReport [variable]
cls.add_instance_attribute('beaconTimingReport', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::forwarding [variable]
cls.add_instance_attribute('forwarding', 'bool', is_const=False)
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability::powerSaveLevel [variable]
cls.add_instance_attribute('powerSaveLevel', 'bool', is_const=False)
return
def register_Ns3Dot11sHwmpProtocol_methods(root_module, cls):
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::HwmpProtocol() [constructor]
cls.add_constructor([])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## hwmp-protocol.h: static ns3::TypeId ns3::dot11s::HwmpProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## hwmp-protocol.h: bool ns3::dot11s::HwmpProtocol::Install(ns3::Ptr<ns3::MeshPointDevice> arg0) [member function]
cls.add_method('Install',
'bool',
[param('ns3::Ptr< ns3::MeshPointDevice >', 'arg0')])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::PeerLinkStatus(ns3::Mac48Address meshPontAddress, ns3::Mac48Address peerAddress, uint32_t interface, bool status) [member function]
cls.add_method('PeerLinkStatus',
'void',
[param('ns3::Mac48Address', 'meshPontAddress'), param('ns3::Mac48Address', 'peerAddress'), param('uint32_t', 'interface'), param('bool', 'status')])
## hwmp-protocol.h: bool ns3::dot11s::HwmpProtocol::RemoveRoutingStuff(uint32_t fromIface, ns3::Mac48Address const source, ns3::Mac48Address const destination, ns3::Ptr<ns3::Packet> packet, uint16_t & protocolType) [member function]
cls.add_method('RemoveRoutingStuff',
'bool',
[param('uint32_t', 'fromIface'), param('ns3::Mac48Address const', 'source'), param('ns3::Mac48Address const', 'destination'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t &', 'protocolType')],
is_virtual=True)
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::Report(std::ostream & arg0) const [member function]
cls.add_method('Report',
'void',
[param('std::ostream &', 'arg0')],
is_const=True)
## hwmp-protocol.h: bool ns3::dot11s::HwmpProtocol::RequestRoute(uint32_t sourceIface, ns3::Mac48Address const source, ns3::Mac48Address const destination, ns3::Ptr<ns3::Packet const> packet, uint16_t protocolType, ns3::Callback<void, bool, ns3::Ptr<ns3::Packet>, ns3::Mac48Address, ns3::Mac48Address, unsigned short, unsigned int, ns3::empty, ns3::empty, ns3::empty> routeReply) [member function]
cls.add_method('RequestRoute',
'bool',
[param('uint32_t', 'sourceIface'), param('ns3::Mac48Address const', 'source'), param('ns3::Mac48Address const', 'destination'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocolType'), param('ns3::Callback< void, bool, ns3::Ptr< ns3::Packet >, ns3::Mac48Address, ns3::Mac48Address, unsigned short, unsigned int, ns3::empty, ns3::empty, ns3::empty >', 'routeReply')],
is_virtual=True)
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::ResetStats() [member function]
cls.add_method('ResetStats',
'void',
[])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::SetNeighboursCallback(ns3::Callback<std::vector<ns3::Mac48Address, std::allocator<ns3::Mac48Address> >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetNeighboursCallback',
'void',
[param('ns3::Callback< std::vector< ns3::Mac48Address >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::SetRoot() [member function]
cls.add_method('SetRoot',
'void',
[])
## hwmp-protocol.h: void ns3::dot11s::HwmpProtocol::UnsetRoot() [member function]
cls.add_method('UnsetRoot',
'void',
[])
return
def register_Ns3Dot11sHwmpProtocolFailedDestination_methods(root_module, cls):
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::FailedDestination() [constructor]
cls.add_constructor([])
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::FailedDestination(ns3::dot11s::HwmpProtocol::FailedDestination const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::HwmpProtocol::FailedDestination const &', 'arg0')])
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::destination [variable]
cls.add_instance_attribute('destination', 'ns3::Mac48Address', is_const=False)
## hwmp-protocol.h: ns3::dot11s::HwmpProtocol::FailedDestination::seqnum [variable]
cls.add_instance_attribute('seqnum', 'uint32_t', is_const=False)
return
def register_Ns3Dot11sIeBeaconTiming_methods(root_module, cls):
cls.add_output_stream_operator()
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTiming::IeBeaconTiming(ns3::dot11s::IeBeaconTiming const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeBeaconTiming const &', 'arg0')])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTiming::IeBeaconTiming() [constructor]
cls.add_constructor([])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::AddNeighboursTimingElementUnit(uint16_t aid, ns3::Time last_beacon, ns3::Time beacon_interval) [member function]
cls.add_method('AddNeighboursTimingElementUnit',
'void',
[param('uint16_t', 'aid'), param('ns3::Time', 'last_beacon'), param('ns3::Time', 'beacon_interval')])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::ClearTimingElement() [member function]
cls.add_method('ClearTimingElement',
'void',
[])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::DelNeighboursTimingElementUnit(uint16_t aid, ns3::Time last_beacon, ns3::Time beacon_interval) [member function]
cls.add_method('DelNeighboursTimingElementUnit',
'void',
[param('uint16_t', 'aid'), param('ns3::Time', 'last_beacon'), param('ns3::Time', 'beacon_interval')])
## ie-dot11s-beacon-timing.h: uint8_t ns3::dot11s::IeBeaconTiming::DeserializeInformationField(ns3::Buffer::Iterator i, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-beacon-timing.h: ns3::WifiInformationElementId ns3::dot11s::IeBeaconTiming::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-beacon-timing.h: uint8_t ns3::dot11s::IeBeaconTiming::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-beacon-timing.h: std::vector<ns3::Ptr<ns3::dot11s::IeBeaconTimingUnit>, std::allocator<ns3::Ptr<ns3::dot11s::IeBeaconTimingUnit> > > ns3::dot11s::IeBeaconTiming::GetNeighboursTimingElementsList() [member function]
cls.add_method('GetNeighboursTimingElementsList',
'std::vector< ns3::Ptr< ns3::dot11s::IeBeaconTimingUnit > >',
[])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTiming::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Dot11sIeBeaconTimingUnit_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTimingUnit::IeBeaconTimingUnit(ns3::dot11s::IeBeaconTimingUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeBeaconTimingUnit const &', 'arg0')])
## ie-dot11s-beacon-timing.h: ns3::dot11s::IeBeaconTimingUnit::IeBeaconTimingUnit() [constructor]
cls.add_constructor([])
## ie-dot11s-beacon-timing.h: uint8_t ns3::dot11s::IeBeaconTimingUnit::GetAid() const [member function]
cls.add_method('GetAid',
'uint8_t',
[],
is_const=True)
## ie-dot11s-beacon-timing.h: uint16_t ns3::dot11s::IeBeaconTimingUnit::GetBeaconInterval() const [member function]
cls.add_method('GetBeaconInterval',
'uint16_t',
[],
is_const=True)
## ie-dot11s-beacon-timing.h: uint16_t ns3::dot11s::IeBeaconTimingUnit::GetLastBeacon() const [member function]
cls.add_method('GetLastBeacon',
'uint16_t',
[],
is_const=True)
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTimingUnit::SetAid(uint8_t aid) [member function]
cls.add_method('SetAid',
'void',
[param('uint8_t', 'aid')])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTimingUnit::SetBeaconInterval(uint16_t beaconInterval) [member function]
cls.add_method('SetBeaconInterval',
'void',
[param('uint16_t', 'beaconInterval')])
## ie-dot11s-beacon-timing.h: void ns3::dot11s::IeBeaconTimingUnit::SetLastBeacon(uint16_t lastBeacon) [member function]
cls.add_method('SetLastBeacon',
'void',
[param('uint16_t', 'lastBeacon')])
return
def register_Ns3Dot11sIeConfiguration_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ie-dot11s-configuration.h: ns3::dot11s::IeConfiguration::IeConfiguration(ns3::dot11s::IeConfiguration const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeConfiguration const &', 'arg0')])
## ie-dot11s-configuration.h: ns3::dot11s::IeConfiguration::IeConfiguration() [constructor]
cls.add_constructor([])
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::IeConfiguration::DeserializeInformationField(ns3::Buffer::Iterator i, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-configuration.h: ns3::WifiInformationElementId ns3::dot11s::IeConfiguration::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::IeConfiguration::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: uint8_t ns3::dot11s::IeConfiguration::GetNeighborCount() [member function]
cls.add_method('GetNeighborCount',
'uint8_t',
[])
## ie-dot11s-configuration.h: bool ns3::dot11s::IeConfiguration::IsAirtime() [member function]
cls.add_method('IsAirtime',
'bool',
[])
## ie-dot11s-configuration.h: bool ns3::dot11s::IeConfiguration::IsHWMP() [member function]
cls.add_method('IsHWMP',
'bool',
[])
## ie-dot11s-configuration.h: ns3::dot11s::Dot11sMeshCapability const & ns3::dot11s::IeConfiguration::MeshCapability() [member function]
cls.add_method('MeshCapability',
'ns3::dot11s::Dot11sMeshCapability const &',
[])
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SetMetric(ns3::dot11s::dot11sPathSelectionMetric metricId) [member function]
cls.add_method('SetMetric',
'void',
[param('ns3::dot11s::dot11sPathSelectionMetric', 'metricId')])
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SetNeighborCount(uint8_t neighbors) [member function]
cls.add_method('SetNeighborCount',
'void',
[param('uint8_t', 'neighbors')])
## ie-dot11s-configuration.h: void ns3::dot11s::IeConfiguration::SetRouting(ns3::dot11s::dot11sPathSelectionProtocol routingId) [member function]
cls.add_method('SetRouting',
'void',
[param('ns3::dot11s::dot11sPathSelectionProtocol', 'routingId')])
return
def register_Ns3Dot11sIeMeshId_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ie-dot11s-id.h: ns3::dot11s::IeMeshId::IeMeshId(ns3::dot11s::IeMeshId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshId const &', 'arg0')])
## ie-dot11s-id.h: ns3::dot11s::IeMeshId::IeMeshId() [constructor]
cls.add_constructor([])
## ie-dot11s-id.h: ns3::dot11s::IeMeshId::IeMeshId(std::string s) [constructor]
cls.add_constructor([param('std::string', 's')])
## ie-dot11s-id.h: uint8_t ns3::dot11s::IeMeshId::DeserializeInformationField(ns3::Buffer::Iterator start, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-id.h: ns3::WifiInformationElementId ns3::dot11s::IeMeshId::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: uint8_t ns3::dot11s::IeMeshId::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: bool ns3::dot11s::IeMeshId::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ie-dot11s-id.h: bool ns3::dot11s::IeMeshId::IsEqual(ns3::dot11s::IeMeshId const & o) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::dot11s::IeMeshId const &', 'o')],
is_const=True)
## ie-dot11s-id.h: char * ns3::dot11s::IeMeshId::PeekString() const [member function]
cls.add_method('PeekString',
'char *',
[],
is_const=True)
## ie-dot11s-id.h: void ns3::dot11s::IeMeshId::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: void ns3::dot11s::IeMeshId::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Dot11sIeMeshIdChecker_methods(root_module, cls):
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdChecker::IeMeshIdChecker() [constructor]
cls.add_constructor([])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdChecker::IeMeshIdChecker(ns3::dot11s::IeMeshIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshIdChecker const &', 'arg0')])
return
def register_Ns3Dot11sIeMeshIdValue_methods(root_module, cls):
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue::IeMeshIdValue() [constructor]
cls.add_constructor([])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue::IeMeshIdValue(ns3::dot11s::IeMeshIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshIdValue const &', 'arg0')])
## ie-dot11s-id.h: ns3::dot11s::IeMeshIdValue::IeMeshIdValue(ns3::dot11s::IeMeshId const & value) [constructor]
cls.add_constructor([param('ns3::dot11s::IeMeshId const &', 'value')])
## ie-dot11s-id.h: ns3::Ptr<ns3::AttributeValue> ns3::dot11s::IeMeshIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: bool ns3::dot11s::IeMeshIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ie-dot11s-id.h: ns3::dot11s::IeMeshId ns3::dot11s::IeMeshIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::dot11s::IeMeshId',
[],
is_const=True)
## ie-dot11s-id.h: std::string ns3::dot11s::IeMeshIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ie-dot11s-id.h: void ns3::dot11s::IeMeshIdValue::Set(ns3::dot11s::IeMeshId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::dot11s::IeMeshId const &', 'value')])
return
def register_Ns3Dot11sIePeerManagement_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement::IePeerManagement(ns3::dot11s::IePeerManagement const & arg0) [copy constructor]
cls.add_constructor([param('ns3::dot11s::IePeerManagement const &', 'arg0')])
## ie-dot11s-peer-management.h: ns3::dot11s::IePeerManagement::IePeerManagement() [constructor]
cls.add_constructor([])
## ie-dot11s-peer-management.h: uint8_t ns3::dot11s::IePeerManagement::DeserializeInformationField(ns3::Buffer::Iterator i, uint8_t length) [member function]
cls.add_method('DeserializeInformationField',
'uint8_t',
[param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')],
is_virtual=True)
## ie-dot11s-peer-management.h: ns3::WifiInformationElementId ns3::dot11s::IePeerManagement::ElementId() const [member function]
cls.add_method('ElementId',
'ns3::WifiInformationElementId',
[],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: uint8_t ns3::dot11s::IePeerManagement::GetInformationFieldSize() const [member function]
cls.add_method('GetInformationFieldSize',
'uint8_t',
[],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: uint16_t ns3::dot11s::IePeerManagement::GetLocalLinkId() const [member function]
cls.add_method('GetLocalLinkId',
'uint16_t',
[],
is_const=True)
## ie-dot11s-peer-management.h: uint16_t ns3::dot11s::IePeerManagement::GetPeerLinkId() const [member function]
cls.add_method('GetPeerLinkId',
'uint16_t',
[],
is_const=True)
## ie-dot11s-peer-management.h: ns3::dot11s::PmpReasonCode ns3::dot11s::IePeerManagement::GetReasonCode() const [member function]
cls.add_method('GetReasonCode',
'ns3::dot11s::PmpReasonCode',
[],
is_const=True)
## ie-dot11s-peer-management.h: uint8_t ns3::dot11s::IePeerManagement::GetSubtype() const [member function]
cls.add_method('GetSubtype',
'uint8_t',
[],
is_const=True)
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SerializeInformationField(ns3::Buffer::Iterator i) const [member function]
cls.add_method('SerializeInformationField',
'void',
[param('ns3::Buffer::Iterator', 'i')],
is_const=True, is_virtual=True)
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SetPeerClose(uint16_t localLinkID, uint16_t peerLinkId, ns3::dot11s::PmpReasonCode reasonCode) [member function]
cls.add_method('SetPeerClose',
'void',
[param('uint16_t', 'localLinkID'), param('uint16_t', 'peerLinkId'), param('ns3::dot11s::PmpReasonCode', 'reasonCode')])
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SetPeerConfirm(uint16_t localLinkID, uint16_t peerLinkId) [member function]
cls.add_method('SetPeerConfirm',
'void',
[param('uint16_t', 'localLinkID'), param('uint16_t', 'peerLinkId')])
## ie-dot11s-peer-management.h: void ns3::dot11s::IePeerManagement::SetPeerOpen(uint16_t localLinkId) [member function]
cls.add_method('SetPeerOpen',
'void',
[param('uint16_t', 'localLinkId')])
## ie-dot11s-peer-management.h: bool ns3::dot11s::IePeerManagement::SubtypeIsClose() const [member function]
cls.add_method('SubtypeIsClose',
'bool',
[],
is_const=True)
## ie-dot11s-peer-management.h: bool ns3::dot11s::IePeerManagement::SubtypeIsConfirm() const [member function]
cls.add_method('SubtypeIsConfirm',
'bool',
[],
is_const=True)
## ie-dot11s-peer-management.h: bool ns3::dot11s::IePeerManagement::SubtypeIsOpen() const [member function]
cls.add_method('SubtypeIsOpen',
'bool',
[],
is_const=True)
return
def register_Ns3Dot11sPeerLink_methods(root_module, cls):
## peer-link.h: static ns3::TypeId ns3::dot11s::PeerLink::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## peer-link.h: ns3::dot11s::PeerLink::PeerLink() [constructor]
cls.add_constructor([])
## peer-link.h: void ns3::dot11s::PeerLink::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## peer-link.h: void ns3::dot11s::PeerLink::SetBeaconInformation(ns3::Time lastBeacon, ns3::Time BeaconInterval) [member function]
cls.add_method('SetBeaconInformation',
'void',
[param('ns3::Time', 'lastBeacon'), param('ns3::Time', 'BeaconInterval')])
## peer-link.h: void ns3::dot11s::PeerLink::SetLinkStatusCallback(ns3::Callback<void,unsigned int,ns3::Mac48Address,bool,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetLinkStatusCallback',
'void',
[param('ns3::Callback< void, unsigned int, ns3::Mac48Address, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## peer-link.h: void ns3::dot11s::PeerLink::SetPeerAddress(ns3::Mac48Address macaddr) [member function]
cls.add_method('SetPeerAddress',
'void',
[param('ns3::Mac48Address', 'macaddr')])
## peer-link.h: void ns3::dot11s::PeerLink::SetPeerMeshPointAddress(ns3::Mac48Address macaddr) [member function]
cls.add_method('SetPeerMeshPointAddress',
'void',
[param('ns3::Mac48Address', 'macaddr')])
## peer-link.h: void ns3::dot11s::PeerLink::SetInterface(uint32_t interface) [member function]
cls.add_method('SetInterface',
'void',
[param('uint32_t', 'interface')])
## peer-link.h: void ns3::dot11s::PeerLink::SetLocalLinkId(uint16_t id) [member function]
cls.add_method('SetLocalLinkId',
'void',
[param('uint16_t', 'id')])
## peer-link.h: void ns3::dot11s::PeerLink::SetLocalAid(uint16_t aid) [member function]
cls.add_method('SetLocalAid',
'void',
[param('uint16_t', 'aid')])
## peer-link.h: uint16_t ns3::dot11s::PeerLink::GetPeerAid() const [member function]
cls.add_method('GetPeerAid',
'uint16_t',
[],
is_const=True)
## peer-link.h: void ns3::dot11s::PeerLink::SetBeaconTimingElement(ns3::dot11s::IeBeaconTiming beaconTiming) [member function]
cls.add_method('SetBeaconTimingElement',
'void',
[param('ns3::dot11s::IeBeaconTiming', 'beaconTiming')])
## peer-link.h: ns3::Mac48Address ns3::dot11s::PeerLink::GetPeerAddress() const [member function]
cls.add_method('GetPeerAddress',
'ns3::Mac48Address',
[],
is_const=True)
## peer-link.h: uint16_t ns3::dot11s::PeerLink::GetLocalAid() const [member function]
cls.add_method('GetLocalAid',
'uint16_t',
[],
is_const=True)
## peer-link.h: ns3::Time ns3::dot11s::PeerLink::GetLastBeacon() const [member function]
cls.add_method('GetLastBeacon',
'ns3::Time',
[],
is_const=True)
## peer-link.h: ns3::Time ns3::dot11s::PeerLink::GetBeaconInterval() const [member function]
cls.add_method('GetBeaconInterval',
'ns3::Time',
[],
is_const=True)
## peer-link.h: ns3::dot11s::IeBeaconTiming ns3::dot11s::PeerLink::GetBeaconTimingElement() const [member function]
cls.add_method('GetBeaconTimingElement',
'ns3::dot11s::IeBeaconTiming',
[],
is_const=True)
## peer-link.h: void ns3::dot11s::PeerLink::MLMECancelPeerLink(ns3::dot11s::PmpReasonCode reason) [member function]
cls.add_method('MLMECancelPeerLink',
'void',
[param('ns3::dot11s::PmpReasonCode', 'reason')])
## peer-link.h: void ns3::dot11s::PeerLink::MLMEActivePeerLinkOpen() [member function]
cls.add_method('MLMEActivePeerLinkOpen',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::MLMEPeeringRequestReject() [member function]
cls.add_method('MLMEPeeringRequestReject',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::MLMESetSignalStatusCallback(ns3::Callback<void, unsigned int, ns3::Mac48Address, ns3::Mac48Address, ns3::dot11s::PeerLink::PeerState, ns3::dot11s::PeerLink::PeerState, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('MLMESetSignalStatusCallback',
'void',
[param('ns3::Callback< void, unsigned int, ns3::Mac48Address, ns3::Mac48Address, ns3::dot11s::PeerLink::PeerState, ns3::dot11s::PeerLink::PeerState, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## peer-link.h: void ns3::dot11s::PeerLink::TransmissionSuccess() [member function]
cls.add_method('TransmissionSuccess',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::TransmissionFailure() [member function]
cls.add_method('TransmissionFailure',
'void',
[])
## peer-link.h: void ns3::dot11s::PeerLink::Report(std::ostream & os) const [member function]
cls.add_method('Report',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Dot11sPeerManagementProtocol_methods(root_module, cls):
## peer-management-protocol.h: ns3::dot11s::PeerManagementProtocol::PeerManagementProtocol() [constructor]
cls.add_constructor([])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ConfigurationMismatch(uint32_t interface, ns3::Mac48Address peerAddress) [member function]
cls.add_method('ConfigurationMismatch',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## peer-management-protocol.h: ns3::Ptr<ns3::dot11s::PeerLink> ns3::dot11s::PeerManagementProtocol::FindPeerLink(uint32_t interface, ns3::Mac48Address peerAddress) [member function]
cls.add_method('FindPeerLink',
'ns3::Ptr< ns3::dot11s::PeerLink >',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
## peer-management-protocol.h: ns3::Mac48Address ns3::dot11s::PeerManagementProtocol::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Mac48Address',
[])
## peer-management-protocol.h: bool ns3::dot11s::PeerManagementProtocol::GetBeaconCollisionAvoidance() const [member function]
cls.add_method('GetBeaconCollisionAvoidance',
'bool',
[],
is_const=True)
## peer-management-protocol.h: ns3::Ptr<ns3::dot11s::IeBeaconTiming> ns3::dot11s::PeerManagementProtocol::GetBeaconTimingElement(uint32_t interface) [member function]
cls.add_method('GetBeaconTimingElement',
'ns3::Ptr< ns3::dot11s::IeBeaconTiming >',
[param('uint32_t', 'interface')])
## peer-management-protocol.h: ns3::Ptr<ns3::dot11s::IeMeshId> ns3::dot11s::PeerManagementProtocol::GetMeshId() const [member function]
cls.add_method('GetMeshId',
'ns3::Ptr< ns3::dot11s::IeMeshId >',
[],
is_const=True)
## peer-management-protocol.h: uint8_t ns3::dot11s::PeerManagementProtocol::GetNumberOfLinks() [member function]
cls.add_method('GetNumberOfLinks',
'uint8_t',
[])
## peer-management-protocol.h: std::vector<ns3::Ptr<ns3::dot11s::PeerLink>,std::allocator<ns3::Ptr<ns3::dot11s::PeerLink> > > ns3::dot11s::PeerManagementProtocol::GetPeerLinks() const [member function]
cls.add_method('GetPeerLinks',
'std::vector< ns3::Ptr< ns3::dot11s::PeerLink > >',
[],
is_const=True)
## peer-management-protocol.h: std::vector<ns3::Mac48Address,std::allocator<ns3::Mac48Address> > ns3::dot11s::PeerManagementProtocol::GetPeers(uint32_t interface) const [member function]
cls.add_method('GetPeers',
'std::vector< ns3::Mac48Address >',
[param('uint32_t', 'interface')],
is_const=True)
## peer-management-protocol.h: static ns3::TypeId ns3::dot11s::PeerManagementProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## peer-management-protocol.h: bool ns3::dot11s::PeerManagementProtocol::Install(ns3::Ptr<ns3::MeshPointDevice> arg0) [member function]
cls.add_method('Install',
'bool',
[param('ns3::Ptr< ns3::MeshPointDevice >', 'arg0')])
## peer-management-protocol.h: bool ns3::dot11s::PeerManagementProtocol::IsActiveLink(uint32_t interface, ns3::Mac48Address peerAddress) [member function]
cls.add_method('IsActiveLink',
'bool',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::NotifyBeaconSent(uint32_t interface, ns3::Time beaconInterval) [member function]
cls.add_method('NotifyBeaconSent',
'void',
[param('uint32_t', 'interface'), param('ns3::Time', 'beaconInterval')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ReceiveBeacon(uint32_t interface, ns3::Mac48Address peerAddress, ns3::Time beaconInterval, ns3::Ptr<ns3::dot11s::IeBeaconTiming> beaconTiming) [member function]
cls.add_method('ReceiveBeacon',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress'), param('ns3::Time', 'beaconInterval'), param('ns3::Ptr< ns3::dot11s::IeBeaconTiming >', 'beaconTiming')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ReceivePeerLinkFrame(uint32_t interface, ns3::Mac48Address peerAddress, ns3::Mac48Address peerMeshPointAddress, uint16_t aid, ns3::dot11s::IePeerManagement peerManagementElement, ns3::dot11s::IeConfiguration meshConfig) [member function]
cls.add_method('ReceivePeerLinkFrame',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address', 'peerAddress'), param('ns3::Mac48Address', 'peerMeshPointAddress'), param('uint16_t', 'aid'), param('ns3::dot11s::IePeerManagement', 'peerManagementElement'), param('ns3::dot11s::IeConfiguration', 'meshConfig')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::Report(std::ostream & arg0) const [member function]
cls.add_method('Report',
'void',
[param('std::ostream &', 'arg0')],
is_const=True)
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::ResetStats() [member function]
cls.add_method('ResetStats',
'void',
[])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::SetBeaconCollisionAvoidance(bool enable) [member function]
cls.add_method('SetBeaconCollisionAvoidance',
'void',
[param('bool', 'enable')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::SetMeshId(std::string s) [member function]
cls.add_method('SetMeshId',
'void',
[param('std::string', 's')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::SetPeerLinkStatusCallback(ns3::Callback<void, ns3::Mac48Address, ns3::Mac48Address, unsigned int, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPeerLinkStatusCallback',
'void',
[param('ns3::Callback< void, ns3::Mac48Address, ns3::Mac48Address, unsigned int, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::TransmissionFailure(uint32_t interface, ns3::Mac48Address const peerAddress) [member function]
cls.add_method('TransmissionFailure',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address const', 'peerAddress')])
## peer-management-protocol.h: void ns3::dot11s::PeerManagementProtocol::TransmissionSuccess(uint32_t interface, ns3::Mac48Address const peerAddress) [member function]
cls.add_method('TransmissionSuccess',
'void',
[param('uint32_t', 'interface'), param('ns3::Mac48Address const', 'peerAddress')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
## ie-dot11s-id.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::dot11s::MakeIeMeshIdChecker() [free function]
module.add_function('MakeIeMeshIdChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
|
ciprian-radu/unimap.ns-3noc
|
bindings/python/apidefs/gcc-ILP32/ns3_module_dot11s.py
|
Python
|
gpl-2.0
| 49,784
|
from django.test import TestCase
from ..models import AwareModel, NaiveModel
from boardinghouse.templatetags.boardinghouse import schema_name, is_schema_aware, is_shared_model
from boardinghouse.models import Schema
class TestTemplateTags(TestCase):
def test_is_schema_aware_filter(self):
self.assertTrue(is_schema_aware(AwareModel()))
self.assertFalse(is_schema_aware(NaiveModel()))
def test_is_shared_model_filter(self):
self.assertFalse(is_shared_model(AwareModel()))
self.assertTrue(is_shared_model(NaiveModel()))
def test_schema_name_filter(self):
Schema.objects.create(name='Schema Name', schema='foo')
self.assertEquals('Schema Name', schema_name('foo'))
self.assertEquals('no schema', schema_name(None))
self.assertEquals('no schema', schema_name(''))
self.assertEquals('no schema', schema_name(False))
self.assertEquals('no schema', schema_name('foobar'))
self.assertEquals('no schema', schema_name('foo_'))
self.assertEquals('no schema', schema_name('foofoo'))
|
luzfcb/django-boardinghouse
|
tests/tests/test_template_tag.py
|
Python
|
bsd-3-clause
| 1,094
|
from rest_framework import generics
from api import *
from serializers import StudentDiscussionSerializer, CourseDiscussionSerializer
from rest_framework.permissions import IsAdminUser
from django.http import Http404
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from oauth2_provider.ext.rest_framework.authentication import OAuth2Authentication
from permissions import IsStudent, IsFaculty
# Create your views here.
class StudentDiscussionList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of students with their count of discussions and questions in the edX Platform.
Each page in the list can contain up to 10 students.
**Example Requests**
GET /api/courses/v2/discussions/students/
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* user_id: The unique identifier for the student.
* count:
* discussion: Count of discussions by the student
* question: Count of questions asked by the student
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_students_count()
serializer_class = StudentDiscussionSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class StudentDiscussionDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get count of discussions and questions for a specific student.
**Example Requests**
GET /api/courses/v2/discussions/students/{student_id}
**Response Values**
On success with Response Code <200>
* user_id: The unique identifier for the student.
* count:
* discussion: Count of discussions by the student
* question: Count of questions asked by the student
**ERROR RESPONSES**
* Response Code <404> STUDENT NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = StudentDiscussionSerializer
permission_classes = (IsAdminUser, IsStudent, )
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
student_id = self.kwargs['student_id']
list = get_count_student(student_id)
list['user_id']
return list
except:
raise Http404
class CourseDiscussionList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of courses with their count of discussions and questions in the edX Platform.
Each page in the list can contain up to 10 students.
**Example Requests**
GET /api/courses/v2/discussions/courses/
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* course_id: The unique identifier for the course.
* count:
* discussion: Count of discussions by the course
* question: Count of questions asked by the course
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_courses_count()
serializer_class = CourseDiscussionSerializer
permission_classes = (IsAdminUser, )
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class CourseDiscussionDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get count of discussions and questions for a specific course.
**Example Requests**
GET /api/courses/v2/discussions/courses/{course_id}
**Response Values**
On success with Response Code <200>
* course_id: The unique identifier for the course.
* count:
* discussion: Count of discussions by the course
* question: Count of questions asked by the course
**ERROR RESPONSES**
* Response Code <404> COURSE NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = CourseDiscussionSerializer
permission_classes = (IsAdminUser, IsFaculty, )
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
course_id = self.kwargs['course_id']
list = get_count_course(course_id)
list['course_id']
return list
except:
raise Http404
|
jaygoswami2303/course_dashboard_api
|
v2/DiscussionAPI/views.py
|
Python
|
mit
| 5,468
|
#!/usr/bin/env python
from loginsightwebhookdemo import app, parse, callapi
from flask import request, json
import logging
__author__ = "Steve Flanders"
__license__ = "Apache v2"
__verion__ = "1.0"
# zendesk parameters
ZENDESKURL = '' # required
ZENDESKUSER = '' # required if not passed in URL
ZENDESKPASS = '' # required if ZENDESKTOKEN or TOKEN is not specified
ZENDESKTOKEN = '' # required if ZENDESKPASS is not specifed or TOKEN is not passed in URL
@app.route("/endpoint/zendesk", methods=['POST'])
@app.route("/endpoint/zendesk/<EMAIL>/<TOKEN>", methods=['POST'])
@app.route("/endpoint/zendesk/<ALERTID>", methods=['POST','PUT'])
@app.route("/endpoint/zendesk/<EMAIL>/<TOKEN>/<ALERTID>", methods=['POST','PUT'])
def zendesk(ALERTID=None, EMAIL=None, TOKEN=None):
"""
Create a new incident for every incoming webhook that does not already have an open incident.
If an incident is already open, add a new comment to the open alert.
Uniqueness is determined by the incoming webhook alert name.
Requires ZENDESK* parameters to be defined.
"""
bauth = request.authorization
if bauth is not None:
global ZENDESKUSER
global ZENDESKPASS
ZENDESKUSER = bauth.username
ZENDESKPASS = bauth.password
if (not ZENDESKURL or (not ZENDESKUSER and not EMAIL) or (not ZENDESKPASS and not ZENDESKTOKEN and not TOKEN)):
return ("ZENDESK* parameters must be set, please edit the shim!", 500, None)
if not ZENDESKUSER:
USER = EMAIL
else:
USER = ZENDESKUSER
# Prefer tokens over passwords
if ZENDESKTOKEN or TOKEN:
if ZENDESKTOKEN:
USER = USER + '/token'
PASS = ZENDESKTOKEN
else:
USER = USER + '/token'
PASS = TOKEN
else:
PASS = ZENDESKPASS
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
a = parse(request)
# Get the list of open incidents that contain the AlertName from the incoming webhook
incident = callapi(ZENDESKURL + '/api/v2/search.json?query=type:ticket status:open subject:"' + a['AlertName'] + '"', 'get', None, headers, (USER, PASS))
i = json.loads(incident)
try: # Determine if there is an open incident already
if i['results'][0]['id'] is not None:
# Option 1: Do nothing
#logging.info('Nothing to do, exiting.')
#return ("OK", 200, None)
# Option 2: Add a new comment
# Limited to 30 updates per 10 minutes (https://developer.zendesk.com/rest_api/docs/core/introduction)
payload = { 'ticket': { 'comment': { 'body': a['moreinfo'] } } }
return callapi(ZENDESKURL + '/api/v2/tickets/' + str(i['results'][0]['id']) + '.json', 'put', json.dumps(payload), headers, (USER, PASS))
except: # If no open incident then open one
payload = {
"ticket": {
#"requester": {
# "name": "Log Insight",
# "email": USER
#},
"subject": a['AlertName'],
"comment": {
"body": a['moreinfo'],
},
"type": 'incident',
"tags": ["loginsight"]
}
}
return callapi(ZENDESKURL + '/api/v2/tickets.json', 'post', json.dumps(payload), headers, (USER, PASS))
|
vmw-loginsight/webhook-shims
|
loginsightwebhookdemo/zendesk.py
|
Python
|
apache-2.0
| 3,385
|
from django.db import models
from django_comments.signals import comment_was_posted
from django.core.mail import send_mail
# Signals
def notify_user_about_comment(sender, **kwargs):
try:
comment = kwargs['comment']
# Only send on GM comment
if not comment.user.is_staff:
return
target_object = comment.content_object
receipient_user = target_object.character.user
to_address = receipient_user.email
html = """
<html>
<body>
<h2>Treachery Downtime Notification</h2>
<p>
Hi %s!
<br><br>
You have recieved a new comment on one of your downtime actions. Please login and reply if needed.
<br><br>
<b>%s</b> commented on your action <i>%s</i>:
<br><br>
<div style='padding-left:10px'>
%s
</div>
</p>
</body>
</html>
""" % (receipient_user.username, comment.name, target_object,
comment.comment)
send_mail('[Treachery Downtime]: New comment',
'',
'',
[to_address],
fail_silently=True,
html_message=html)
except:
print('Error while sending mail notification: %s' % kwargs)
comment_was_posted.connect(notify_user_about_comment)
|
TreacheryLarp/downtime
|
gamemaster/models.py
|
Python
|
gpl-3.0
| 1,444
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services PVT. LTD.
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# ---------------------------------------------------------------------------
from openerp import models, fields, api
class FolioReportWizard(models.TransientModel):
_name = 'folio.report.wizard'
_rec_name = 'date_start'
date_start = fields.Datetime('Start Date')
date_end = fields.Datetime('End Date')
@api.multi
def print_report(self):
data = {
'ids': self.ids,
'model': 'hotel.folio',
'form': self.read(['date_start', 'date_end'])[0]
}
return self.env['report'].get_action(self, 'hotel.report_hotel_folio',
data=data)
|
vileopratama/vitech
|
src/addons/hotel/wizard/hotel_wizard.py
|
Python
|
mit
| 1,573
|
"""
Unit tests for ProgramEnrollment models.
"""
from uuid import uuid4
import ddt
from django.db.utils import IntegrityError
from django.test import TestCase
from edx_django_utils.cache import RequestCache
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from lms.djangoapps.program_enrollments.models import ProgramCourseEnrollment, ProgramEnrollment
from openedx.core.djangoapps.catalog.tests.factories import generate_course_run_key
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
class ProgramEnrollmentModelTests(TestCase):
"""
Tests for the ProgramEnrollment model.
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super(ProgramEnrollmentModelTests, self).setUp()
self.user = UserFactory.create()
self.program_uuid = uuid4()
self.other_program_uuid = uuid4()
self.curriculum_uuid = uuid4()
self.enrollment = ProgramEnrollment.objects.create(
user=self.user,
external_user_key='abc',
program_uuid=self.program_uuid,
curriculum_uuid=self.curriculum_uuid,
status='enrolled'
)
def test_unique_external_key_program_curriculum(self):
"""
A record with the same (external_user_key, program_uuid, curriculum_uuid) cannot be duplicated.
"""
with self.assertRaises(IntegrityError):
_ = ProgramEnrollment.objects.create(
user=None,
external_user_key='abc',
program_uuid=self.program_uuid,
curriculum_uuid=self.curriculum_uuid,
status='pending',
)
def test_unique_user_program_curriculum(self):
"""
A record with the same (user, program_uuid, curriculum_uuid) cannot be duplicated.
"""
with self.assertRaises(IntegrityError):
_ = ProgramEnrollment.objects.create(
user=self.user,
external_user_key=None,
program_uuid=self.program_uuid,
curriculum_uuid=self.curriculum_uuid,
status='suspended',
)
def test_user_retirement(self):
"""
Test that the external_user_key is successfully retired for a user's program enrollments
and history.
"""
new_status = 'canceled'
self.enrollment.status = new_status
self.enrollment.save()
# Ensure that all the records had values for external_user_key
self.assertEqual(self.enrollment.external_user_key, 'abc')
self.assertTrue(self.enrollment.historical_records.all())
for record in self.enrollment.historical_records.all():
self.assertEqual(record.external_user_key, 'abc')
ProgramEnrollment.retire_user(self.user.id)
self.enrollment.refresh_from_db()
# Ensure those values are retired
self.assertEqual(self.enrollment.external_user_key, None)
self.assertTrue(self.enrollment.historical_records.all())
for record in self.enrollment.historical_records.all():
self.assertEqual(record.external_user_key, None)
@ddt.ddt
class ProgramCourseEnrollmentModelTests(TestCase):
"""
Tests for the ProgramCourseEnrollment model.
"""
def setUp(self):
"""
Set up test data
"""
super(ProgramCourseEnrollmentModelTests, self).setUp()
RequestCache.clear_all_namespaces()
self.user = UserFactory.create()
self.program_uuid = uuid4()
self.program_enrollment = ProgramEnrollment.objects.create(
user=self.user,
external_user_key='abc',
program_uuid=self.program_uuid,
curriculum_uuid=uuid4(),
status='enrolled'
)
self.course_key = CourseKey.from_string(generate_course_run_key())
CourseOverviewFactory(id=self.course_key)
def test_duplicate_enrollments_allowed(self):
"""
A record with the same (program_enrollment, course_enrollment)
can be created as long as only one record is active for the
same course_enrollment
"""
pce = self._create_completed_program_course_enrollment()
ProgramCourseEnrollment.objects.create(
program_enrollment=pce.program_enrollment,
course_key="course-v1:dummy+value+101",
course_enrollment=pce.course_enrollment,
status="inactive",
)
def test_unique_waiting_enrollment(self):
"""
A record with the same (program_enrollment, course_key)
cannot be created.
"""
pce = self._create_waiting_program_course_enrollment()
with self.assertRaises(IntegrityError):
ProgramCourseEnrollment.objects.create(
program_enrollment=pce.program_enrollment,
course_key=pce.course_key,
course_enrollment=None,
status="inactive",
)
def _create_completed_program_course_enrollment(self):
""" helper function create program course enrollment """
course_enrollment = CourseEnrollmentFactory.create(
course_id=self.course_key,
user=self.user,
mode=CourseMode.MASTERS
)
program_course_enrollment = ProgramCourseEnrollment.objects.create(
program_enrollment=self.program_enrollment,
course_key=self.course_key,
course_enrollment=course_enrollment,
status="active"
)
return program_course_enrollment
def _create_waiting_program_course_enrollment(self):
""" helper function create program course enrollment with no lms user """
return ProgramCourseEnrollment.objects.create(
program_enrollment=self.program_enrollment,
course_key=self.course_key,
course_enrollment=None,
status="active"
)
|
cpennington/edx-platform
|
lms/djangoapps/program_enrollments/tests/test_models.py
|
Python
|
agpl-3.0
| 6,137
|
#!/usr/bin/env python
import re
from time import sleep
import telnetlib
HOST = 'localhost'
PORT = 4242
prompt = [r'repl\d*> '] # list of regular expressions
def get_page(url, wait=3):
tn = telnetlib.Telnet(HOST, PORT)
tn.expect(prompt)
cmd = "content.location.href = '{url}'".format(url=url)
tn.write(cmd + "\n")
tn.expect(prompt)
if wait:
print '# waiting {X} seconds...'.format(X=wait)
sleep(wait)
print '# continue'
#
tn.write('content.document.body.innerHTML\n')
html = tn.expect(prompt)[2].split('\n')
if html[0].strip() == '"':
html = html[1:]
if re.search(prompt[0], html[-1]):
html = html[:-1]
if html[-1].strip() == '"':
html = html[:-1]
tn.write("repl.quit()\n")
return html
##################################
if __name__ == "__main__":
print 'OK'
html = get_page('http://simile.mit.edu/crowbar/test.html')
for line in html:
print line
print '================'
print 'Death'
url = 'http://www.ncbi.nlm.nih.gov/nuccore/CP002059.1'
html = get_page(url, wait=30)
for line in html:
print line
|
jabbalaci/jabbapylib
|
demos/browser_automation/moz_repl.py
|
Python
|
gpl-3.0
| 1,160
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zipkin Export Encoders for JSON formats
"""
from typing import Dict, List
from opentelemetry.exporter.zipkin.encoder import Encoder, JsonEncoder
from opentelemetry.trace import Span
# pylint: disable=W0223
class V1Encoder(Encoder):
def _extract_binary_annotations(
self, span: Span, encoded_local_endpoint: Dict
) -> List[Dict]:
binary_annotations = []
for tag_key, tag_value in self._extract_tags_from_span(span).items():
if isinstance(tag_value, str) and self.max_tag_value_length > 0:
tag_value = tag_value[: self.max_tag_value_length]
binary_annotations.append(
{
"key": tag_key,
"value": tag_value,
"endpoint": encoded_local_endpoint,
}
)
return binary_annotations
class JsonV1Encoder(JsonEncoder, V1Encoder):
"""Zipkin Export Encoder for JSON v1 API
API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin-api.yaml
"""
def _encode_span(self, span: Span, encoded_local_endpoint: Dict) -> Dict:
context = span.get_span_context()
encoded_span = {
"traceId": self._encode_trace_id(context.trace_id),
"id": self._encode_span_id(context.span_id),
"name": span.name,
"timestamp": self._nsec_to_usec_round(span.start_time),
"duration": self._nsec_to_usec_round(
span.end_time - span.start_time
),
}
encoded_annotations = self._extract_annotations_from_events(
span.events
)
if encoded_annotations is not None:
for annotation in encoded_annotations:
annotation["endpoint"] = encoded_local_endpoint
encoded_span["annotations"] = encoded_annotations
binary_annotations = self._extract_binary_annotations(
span, encoded_local_endpoint
)
if binary_annotations:
encoded_span["binaryAnnotations"] = binary_annotations
debug = self._encode_debug(context)
if debug:
encoded_span["debug"] = debug
parent_id = self._get_parent_id(span.parent)
if parent_id is not None:
encoded_span["parentId"] = self._encode_span_id(parent_id)
return encoded_span
|
open-telemetry/opentelemetry-python
|
exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json/v1/__init__.py
|
Python
|
apache-2.0
| 2,951
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Guillaume Pernot <gpernot@praksys.org>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
import testscenarios
from ceilometer.central import manager
from ceilometer.objectstore import swift
from ceilometer.tests import base
from keystoneclient import exceptions
from swiftclient import client as swift_client
load_tests = testscenarios.load_tests_apply_scenarios
ACCOUNTS = [('tenant-000', {'x-account-object-count': 12,
'x-account-bytes-used': 321321321,
'x-account-container-count': 7,
}),
('tenant-001', {'x-account-object-count': 34,
'x-account-bytes-used': 9898989898,
'x-account-container-count': 17,
})]
class TestManager(manager.AgentManager):
def __init__(self):
super(TestManager, self).__init__()
self.keystone = mock.MagicMock()
class TestSwiftPollster(base.TestCase):
# Define scenarios to run all of the tests against all of the
# pollsters.
scenarios = [
('storage.objects',
{'factory': swift.ObjectsPollster}),
('storage.objects.size',
{'factory': swift.ObjectsSizePollster}),
('storage.objects.containers',
{'factory': swift.ObjectsContainersPollster}),
]
@staticmethod
def fake_ks_service_catalog_url_for(*args, **kwargs):
raise exceptions.EndpointNotFound("Fake keystone exception")
def fake_iter_accounts(self, ksclient, cache):
for i in ACCOUNTS:
yield i
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(TestSwiftPollster, self).setUp()
self.pollster = self.factory()
self.manager = TestManager()
def test_iter_accounts_no_cache(self):
def empty_account_info(obj, ksclient, cache):
return []
self.stubs.Set(self.factory, '_get_account_info',
empty_account_info)
cache = {}
data = list(self.pollster._iter_accounts(mock.Mock(), cache))
self.assertTrue(self.pollster.CACHE_KEY_TENANT in cache)
self.assertTrue(self.pollster.CACHE_KEY_HEAD in cache)
self.assertEqual(data, [])
def test_iter_accounts_tenants_cached(self):
# Verify that if there are tenants pre-cached then the account
# info loop iterates over those instead of asking for the list
# again.
ksclient = mock.Mock()
ksclient.tenants.list.side_effect = AssertionError(
'should not be called',
)
self.stubs.Set(swift_client, 'head_account',
ksclient)
self.stubs.Set(self.factory, '_neaten_url',
mock.Mock())
Tenant = collections.namedtuple('Tenant', 'id')
cache = {
self.pollster.CACHE_KEY_TENANT: [Tenant(ACCOUNTS[0][0])],
}
data = list(self.pollster._iter_accounts(mock.Mock(), cache))
self.assertTrue(self.pollster.CACHE_KEY_HEAD in cache)
self.assertEqual(data[0][0], ACCOUNTS[0][0])
def test_neaten_url(self):
test_endpoint = 'http://127.0.0.1:8080'
test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b'
standard_url = test_endpoint + '/v1/' + 'AUTH_' + test_tenant_id
self.assertEqual(standard_url,
swift._Base._neaten_url(test_endpoint,
test_tenant_id))
self.assertEqual(standard_url,
swift._Base._neaten_url(test_endpoint + '/',
test_tenant_id))
self.assertEqual(standard_url,
swift._Base._neaten_url(test_endpoint + '/v1',
test_tenant_id))
self.assertEqual(standard_url,
swift._Base._neaten_url(standard_url,
test_tenant_id))
def test_metering(self):
self.stubs.Set(self.factory, '_iter_accounts',
self.fake_iter_accounts)
samples = list(self.pollster.get_samples(self.manager, {}))
self.assertEqual(len(samples), 2)
def test_get_meter_names(self):
self.stubs.Set(self.factory, '_iter_accounts',
self.fake_iter_accounts)
samples = list(self.pollster.get_samples(self.manager, {}))
self.assertEqual(set([s.name for s in samples]),
set([samples[0].name]))
def test_endpoint_notfound(self):
self.stubs.Set(self.manager.keystone.service_catalog, 'url_for',
self.fake_ks_service_catalog_url_for)
samples = list(self.pollster.get_samples(self.manager, {}))
self.assertEqual(len(samples), 0)
|
citrix-openstack-build/ceilometer
|
tests/objectstore/test_swift.py
|
Python
|
apache-2.0
| 5,534
|
from datetime import datetime
from wdim.util import pack
from wdim.orm import fields
from wdim.orm import Storable
from wdim.client.actions import Action
class Journal(Storable):
_id = fields.ObjectIdField()
action = fields.EnumField(Action)
record_id = fields.StringField()
created_by = fields.StringField()
modified_by = fields.StringField()
created = fields.DatetimeField()
modified = fields.DatetimeField()
blob = fields.ForeignField(Storable.ClassGetter('Blob'))
schema = fields.ForeignField(Storable.ClassGetter('Blob'), required=False)
namespace = fields.ForeignField(Storable.ClassGetter('Namespace'))
collection = fields.ForeignField(Storable.ClassGetter('Collection'))
class Meta:
indexes = [
pack('modified', order=1),
pack('namespace', 'collection', 'record_id', order=1),
pack('namespace', 'collection', 'record_id', 'modified', order=1, unique=True),
pack('namespace', 'collection', 'record_id', 'modified', 'action', order=1, unique=True),
]
@classmethod
async def create(cls, *, modified=None, created=None, **kwargs):
assert kwargs.get('created_by') is not None, 'created_by must be specified'
modified = datetime.utcnow()
if kwargs.get('action') == Action.CREATE:
created = modified
kwargs['modified_by'] = kwargs['created_by']
assert created is not None, 'created must be specified'
assert kwargs.get('modified_by') is not None, 'modified_by must be specified'
# Classmethod supers need arguments for some reason
return await super(Journal, cls).create(modified=modified, created=created, **kwargs)
async def serialize(self, furl):
self_url = furl.copy()
self_url.path.segments.append(str(self._id))
return {
'data': {
'id': str(self._id),
'type': '{}:journal'.format((await self.collection).name),
'attributes': {
'action': self.action,
'data': (await self.blob).data,
'created_by': self.created_by,
'modified_by': self.modified_by,
'created': self.created.isoformat(),
'modified': self.modified.isoformat()
},
'links': {
'self': self_url.url
}
}
}
|
chrisseto/Still
|
wdim/client/journal.py
|
Python
|
mit
| 2,481
|
"""
This package contains visualization helpers for the geometric primitives
in use in ``phantomas``.
"""
__all__ = ["views"]
|
oesteban/phantomas
|
phantomas/visu/__init__.py
|
Python
|
bsd-3-clause
| 126
|
# coding=UTF-8
"""
Tests for signal handling in commerce djangoapp.
"""
from __future__ import unicode_literals
import base64
import json
from urlparse import urljoin
import ddt
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from django.test.utils import override_settings
import httpretty
import mock
from opaque_keys.edx.keys import CourseKey
from requests import Timeout
from student.models import UNENROLL_DONE
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from commerce.signals import (refund_seat, send_refund_notification, generate_refund_notification_body,
create_zendesk_ticket)
from commerce.tests import TEST_PUBLIC_URL_ROOT, TEST_API_URL, TEST_API_SIGNING_KEY, JSON
from commerce.tests.mocks import mock_create_refund
from course_modes.models import CourseMode
ZENDESK_URL = 'http://zendesk.example.com/'
ZENDESK_USER = 'test@example.com'
ZENDESK_API_KEY = 'abc123'
@ddt.ddt
@override_settings(
ECOMMERCE_PUBLIC_URL_ROOT=TEST_PUBLIC_URL_ROOT,
ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY,
ZENDESK_URL=ZENDESK_URL, ZENDESK_USER=ZENDESK_USER, ZENDESK_API_KEY=ZENDESK_API_KEY
)
class TestRefundSignal(TestCase):
"""
Exercises logic triggered by the UNENROLL_DONE signal.
"""
def setUp(self):
super(TestRefundSignal, self).setUp()
self.requester = UserFactory(username="test-requester")
self.student = UserFactory(
username="test-student",
email="test-student@example.com",
)
self.course_enrollment = CourseEnrollmentFactory(
user=self.student,
course_id=CourseKey.from_string('course-v1:org+course+run'),
mode=CourseMode.VERIFIED,
)
self.course_enrollment.refundable = mock.Mock(return_value=True)
def send_signal(self, skip_refund=False):
"""
DRY helper: emit the UNENROLL_DONE signal, as is done in
common.djangoapps.student.models after a successful unenrollment.
"""
UNENROLL_DONE.send(sender=None, course_enrollment=self.course_enrollment, skip_refund=skip_refund)
@override_settings(
ECOMMERCE_PUBLIC_URL_ROOT=None,
ECOMMERCE_API_URL=None,
ECOMMERCE_API_SIGNING_KEY=None,
)
def test_no_service(self):
"""
Ensure that the receiver quietly bypasses attempts to initiate
refunds when there is no external service configured.
"""
with mock.patch('commerce.signals.refund_seat') as mock_refund_seat:
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('commerce.signals.refund_seat')
def test_receiver(self, mock_refund_seat):
"""
Ensure that the UNENROLL_DONE signal triggers correct calls to
refund_seat(), when it is appropriate to do so.
TODO (jsa): ideally we would assert that the signal receiver got wired
up independently of the import statement in this module. I'm not aware
of any reliable / sane way to do this.
"""
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment, self.student))
# if skip_refund is set to True in the signal, we should not try to initiate a refund.
mock_refund_seat.reset_mock()
self.send_signal(skip_refund=True)
self.assertFalse(mock_refund_seat.called)
# if the course_enrollment is not refundable, we should not try to initiate a refund.
mock_refund_seat.reset_mock()
self.course_enrollment.refundable = mock.Mock(return_value=False)
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('commerce.signals.refund_seat')
@mock.patch('commerce.signals.get_request_user', return_value=None)
def test_requester(self, mock_get_request_user, mock_refund_seat):
"""
Ensure the right requester is specified when initiating refunds.
"""
# no HTTP request/user: auth to commerce service as the unenrolled student.
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment, self.student))
# HTTP user is the student: auth to commerce service as the unenrolled student.
mock_get_request_user.return_value = self.student
mock_refund_seat.reset_mock()
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment, self.student))
# HTTP user is another user: auth to commerce service as the requester.
mock_get_request_user.return_value = self.requester
mock_refund_seat.reset_mock()
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment, self.requester))
# HTTP user is another server (AnonymousUser): do not try to initiate a refund at all.
mock_get_request_user.return_value = AnonymousUser()
mock_refund_seat.reset_mock()
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('commerce.signals.log.warning')
def test_not_authorized_warning(self, mock_log_warning):
"""
Ensure that expected authorization issues are logged as warnings.
"""
with mock_create_refund(status=403):
refund_seat(self.course_enrollment, UserFactory())
self.assertTrue(mock_log_warning.called)
@mock.patch('commerce.signals.log.exception')
def test_error_logging(self, mock_log_exception):
"""
Ensure that unexpected Exceptions are logged as errors (but do not
break program flow).
"""
with mock_create_refund(status=500):
self.send_signal()
self.assertTrue(mock_log_exception.called)
@mock.patch('commerce.signals.send_refund_notification')
def test_notification(self, mock_send_notification):
"""
Ensure the notification function is triggered when refunds are
initiated
"""
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertTrue(mock_send_notification.called)
@mock.patch('commerce.signals.send_refund_notification')
def test_notification_no_refund(self, mock_send_notification):
"""
Ensure the notification function is NOT triggered when no refunds are
initiated
"""
with mock_create_refund(status=200, response=[]):
self.send_signal()
self.assertFalse(mock_send_notification.called)
@mock.patch('commerce.signals.send_refund_notification')
@ddt.data(
CourseMode.HONOR,
CourseMode.PROFESSIONAL,
CourseMode.AUDIT,
CourseMode.NO_ID_PROFESSIONAL_MODE,
CourseMode.CREDIT_MODE,
)
def test_notification_not_verified(self, mode, mock_send_notification):
"""
Ensure the notification function is NOT triggered when the
unenrollment is for any mode other than verified (i.e. any mode other
than one for which refunds are presently supported). See the
TODO associated with XCOM-371 in the signals module in the commerce
package for more information.
"""
self.course_enrollment.mode = mode
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertFalse(mock_send_notification.called)
@mock.patch('commerce.signals.send_refund_notification', side_effect=Exception("Splat!"))
@mock.patch('commerce.signals.log.warning')
def test_notification_error(self, mock_log_warning, mock_send_notification):
"""
Ensure an error occuring during notification does not break program
flow, but a warning is logged.
"""
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertTrue(mock_send_notification.called)
self.assertTrue(mock_log_warning.called)
@mock.patch('openedx.core.djangoapps.theming.helpers.is_request_in_themed_site', return_value=True)
def test_notification_themed_site(self, mock_is_request_in_themed_site): # pylint: disable=unused-argument
"""
Ensure the notification function raises an Exception if used in the
context of themed site.
"""
with self.assertRaises(NotImplementedError):
send_refund_notification(self.course_enrollment, [1, 2, 3])
@ddt.data('email@example.com', 'üñîcode.email@example.com')
@mock.patch('lms.djangoapps.commerce.signals.create_zendesk_ticket')
def test_send_refund_notification(self, student_email, mock_zendesk):
""" Verify the support team is notified of the refund request. """
refund_ids = [1, 2, 3]
# pass a student with unicode and ascii email to ensure that
# generate_refund_notification_body can handle formatting a unicode
# message
self.student.email = student_email
send_refund_notification(self.course_enrollment, refund_ids)
body = generate_refund_notification_body(self.student, refund_ids)
mock_zendesk.assert_called_with(
self.student.profile.name,
self.student.email,
"[Refund] User-Requested Refund",
body,
['auto_refund']
)
def _mock_zendesk_api(self, status=201):
""" Mock Zendesk's ticket creation API. """
httpretty.register_uri(httpretty.POST, urljoin(ZENDESK_URL, '/api/v2/tickets.json'), status=status,
body='{}', content_type=JSON)
def call_create_zendesk_ticket(self, name='Test user', email='user@example.com', subject='Test Ticket',
body='I want a refund!', tags=None):
""" Call the create_zendesk_ticket function. """
tags = tags or ['auto_refund']
create_zendesk_ticket(name, email, subject, body, tags)
@override_settings(ZENDESK_URL=ZENDESK_URL, ZENDESK_USER=None, ZENDESK_API_KEY=None)
def test_create_zendesk_ticket_no_settings(self):
""" Verify the Zendesk API is not called if the settings are not all set. """
with mock.patch('requests.post') as mock_post:
self.call_create_zendesk_ticket()
self.assertFalse(mock_post.called)
def test_create_zendesk_ticket_request_error(self):
"""
Verify exceptions are handled appropriately if the request to the Zendesk API fails.
We simply need to ensure the exception is not raised beyond the function.
"""
with mock.patch('requests.post', side_effect=Timeout) as mock_post:
self.call_create_zendesk_ticket()
self.assertTrue(mock_post.called)
@httpretty.activate
def test_create_zendesk_ticket(self):
""" Verify the Zendesk API is called. """
self._mock_zendesk_api()
name = 'Test user'
email = 'user@example.com'
subject = 'Test Ticket'
body = 'I want a refund!'
tags = ['auto_refund']
self.call_create_zendesk_ticket(name, email, subject, body, tags)
last_request = httpretty.last_request()
# Verify the headers
expected = {
'content-type': JSON,
'Authorization': 'Basic ' + base64.b64encode(
'{user}/token:{pwd}'.format(user=ZENDESK_USER, pwd=ZENDESK_API_KEY))
}
self.assertDictContainsSubset(expected, last_request.headers)
# Verify the content
expected = {
'ticket': {
'requester': {
'name': name,
'email': email
},
'subject': subject,
'comment': {'body': body},
'tags': ['LMS'] + tags
}
}
self.assertDictEqual(json.loads(last_request.body), expected)
|
synergeticsedx/deployment-wipro
|
lms/djangoapps/commerce/tests/test_signals.py
|
Python
|
agpl-3.0
| 12,240
|
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.derived_metric_definition import DerivedMetricDefinition # noqa: E501
from wavefront_api_client.rest import ApiException
class TestDerivedMetricDefinition(unittest.TestCase):
"""DerivedMetricDefinition unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDerivedMetricDefinition(self):
"""Test DerivedMetricDefinition"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.derived_metric_definition.DerivedMetricDefinition() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
wavefrontHQ/python-client
|
test/test_derived_metric_definition.py
|
Python
|
apache-2.0
| 1,356
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ComicSite.header_image'
db.add_column('comicmodels_comicsite', 'header_image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ComicSite.header_image'
db.delete_column('comicmodels_comicsite', 'header_image')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comicmodels.comicsite': {
'Meta': {'object_name': 'ComicSite'},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'header_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'default': "'http://www.grand-challenge.org/images/a/a7/Grey.png'", 'max_length': '200'}),
'short_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'skin': ('django.db.models.fields.CharField', [], {'max_length': '225', 'blank': 'True'})
},
'comicmodels.dropboxfolder': {
'Meta': {'object_name': 'DropboxFolder'},
'access_token_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'access_token_secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'comicsite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comicmodels.ComicSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_status_msg': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1023', 'blank': 'True'}),
'permission_lvl': ('django.db.models.fields.CharField', [], {'default': "'ALL'", 'max_length': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'comicmodels.filesystemdataset': {
'Meta': {'object_name': 'FileSystemDataset'},
'comicsite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comicmodels.ComicSite']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'folder': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permission_lvl': ('django.db.models.fields.CharField', [], {'default': "'ALL'", 'max_length': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'comicmodels.page': {
'Meta': {'ordering': "['comicsite', 'order']", 'unique_together': "(('comicsite', 'title'),)", 'object_name': 'Page'},
'comicsite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comicmodels.ComicSite']"}),
'display_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'permission_lvl': ('django.db.models.fields.CharField', [], {'default': "'ALL'", 'max_length': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'comicmodels.uploadmodel': {
'Meta': {'object_name': 'UploadModel'},
'comicsite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comicmodels.ComicSite']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date.today', 'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date.today', 'auto_now': 'True', 'blank': 'True'}),
'permission_lvl': ('django.db.models.fields.CharField', [], {'default': "'ALL'", 'max_length': '3'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['comicmodels']
|
cpatrick/comic-django
|
django/comicmodels/migrations/0014_auto__add_field_comicsite_header_image.py
|
Python
|
apache-2.0
| 8,212
|
"""
Given two arrays, write a function to compute their intersection.
Example:
Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2].
Note:
Each element in the result must be unique.
The result can be in any order.
"""
class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if nums1==[] or nums2==[]:
return []
return list(set(nums1).intersection(set(nums2)))
|
ufjfeng/leetcode-jf-soln
|
python/349_intersection_of_two_arrays.py
|
Python
|
mit
| 526
|
from openerp import models, fields, api
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class ModelsXMLExport(models.Model):
_name = 'setting.connect'
node_address = fields.Char(string="Node Address")
privat_key = fields.Text(string="Privat Key")
asset = fields.Text(string="Asset")
import_node = fields.Char(string="Import Node Address")
privat_key_import = fields.Text(string="Import Privat Key")
import_assert = fields.Text(string="Import Asset")
|
stanta/darfchain
|
darfchain_docker_vagrant/addons/darfchain/models/models.py
|
Python
|
gpl-3.0
| 531
|
#!/usr/bin/python
# Copyright (C) 2010 Ryan Kavanagh <ryanakca@kubuntu.org>
#
# Slingshot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Slingshot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Slingshot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from distutils.core import setup
import distutils.sysconfig
import os
import os.path
import re
import sys
# Default prefix
prefix = '/usr/local'
# Get the install prefix if one is specified from the command line
for i, arg in enumerate(sys.argv):
prefix_regex = re.compile('(?P<prefix>--prefix)?[\=\s]?(?P<path>/[\w\s/]*)')
if prefix_regex.match(arg):
if prefix_regex.match(arg).group('prefix') and not prefix_regex.match(arg).group('path'):
# We got --prefix with a space instead of an equal. The next arg will have our path.
prefix = os.path.expandvars(prefix_regex.match(sys.argv[i+1]).group('path'))
elif prefix_regex.match(arg).group('path'):
prefix = prefix_regex.match(arg).group('path')
elif (sys.argv[i-1] == '--prefix') and prefix_regex.match(arg).group('path'):
prefix = os.path.expandvars(prefix_regex.match(arg).group('path'))
data_files = [(os.path.join(prefix,'share/applications/'),
['data/slingshot.desktop']),
(os.path.join(prefix, 'share/pixmaps/'),
['data/slingshot.xpm'])
]
setup(name='slingshot',
version='0.9',
description='Simple 2D shooting strategy game set in space, with gravity',
author='See README',
license='GNU General Public License version 2, or (at your option) ' +\
'any later version',
scripts=['src/bin/slingshot'],
packages=['slingshot'],
package_data={'slingshot':['data/*.png',
'data/*.ttf']},
package_dir={'slingshot':'src/slingshot'},
data_files=data_files,
)
|
ryanakca/slingshot
|
setup.py
|
Python
|
gpl-2.0
| 2,424
|
# Copyright (C) 2012-2021 Germar Reitze
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from PyQt5.QtGui import QIcon
for theme in ('ubuntu-mono-dark', 'gnome', 'oxygen'):
if not QIcon.fromTheme('document-save').isNull():
break
QIcon.setThemeName(theme)
#BackInTime Logo
BIT_LOGO = QIcon.fromTheme('document-save')
BIT_LOGO_INFO = QIcon.fromTheme('document-save-as')
#Main toolbar
TAKE_SNAPSHOT = BIT_LOGO
PAUSE = QIcon.fromTheme('media-playback-pause')
RESUME = QIcon.fromTheme('media-playback-start')
STOP = QIcon.fromTheme('media-playback-stop')
REFRESH_SNAPSHOT = QIcon.fromTheme('view-refresh')
SNAPSHOT_NAME = QIcon.fromTheme('stock_edit',
QIcon.fromTheme('gtk-edit',
QIcon.fromTheme('edit-rename',
QIcon.fromTheme('accessories-text-editor'))))
REMOVE_SNAPSHOT = QIcon.fromTheme('edit-delete')
VIEW_SNAPSHOT_LOG = QIcon.fromTheme('text-plain',
QIcon.fromTheme('text-x-generic'))
VIEW_LAST_LOG = QIcon.fromTheme('document-new')
SETTINGS = QIcon.fromTheme('gtk-preferences',
QIcon.fromTheme('configure'))
SHUTDOWN = QIcon.fromTheme('system-shutdown')
EXIT = QIcon.fromTheme('gtk-close',
QIcon.fromTheme('application-exit'))
#Help menu
HELP = QIcon.fromTheme('help-contents')
WEBSITE = QIcon.fromTheme('go-home')
CHANGELOG = QIcon.fromTheme('format-justify-fill')
FAQ = QIcon.fromTheme('help-faq',
QIcon.fromTheme('help-hint'))
QUESTION = QIcon.fromTheme('stock_dialog-question',
QIcon.fromTheme('help-feedback'))
BUG = QIcon.fromTheme('stock_dialog-error',
QIcon.fromTheme('tools-report-bug'))
ABOUT = QIcon.fromTheme('help-about')
#Files toolbar
UP = QIcon.fromTheme('go-up')
SHOW_HIDDEN = QIcon.fromTheme('show-hidden',
QIcon.fromTheme('list-add'))
RESTORE = QIcon.fromTheme('edit-undo')
RESTORE_TO = QIcon.fromTheme('document-revert')
SNAPSHOTS = QIcon.fromTheme('file-manager',
QIcon.fromTheme('view-list-details',
QIcon.fromTheme('system-file-manager')))
#Snapshot dialog
DIFF_OPTIONS = SETTINGS
DELETE_FILE = REMOVE_SNAPSHOT
SELECT_ALL = QIcon.fromTheme('edit-select-all')
#Restore dialog
RESTORE_DIALOG = VIEW_SNAPSHOT_LOG
#Settings dialog
SETTINGS_DIALOG = SETTINGS
PROFILE_EDIT = SNAPSHOT_NAME
ADD = QIcon.fromTheme('list-add')
REMOVE = QIcon.fromTheme('list-remove')
FOLDER = QIcon.fromTheme('folder')
FILE = VIEW_SNAPSHOT_LOG
EXCLUDE = REMOVE_SNAPSHOT
DEFAULT_EXCLUDE = QIcon.fromTheme('emblem-important')
INVALID_EXCLUDE = QIcon.fromTheme('emblem-ohno',
QIcon.fromTheme('face-surprise'))
|
bit-team/backintime
|
qt/icon.py
|
Python
|
gpl-2.0
| 3,799
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import logging
from django import forms
from django.core.mail.message import EmailMessage
from django.utils.translation import ugettext as _
from shuup.notify.base import Action, Binding
from shuup.notify.enums import ConstantUse, TemplateUse
from shuup.notify.typology import Email, Language, Text
class SendEmail(Action):
identifier = "send_email"
template_use = TemplateUse.MULTILINGUAL
template_fields = {
"subject": forms.CharField(required=True, label=_(u"Subject")),
"body": forms.CharField(required=True, label=_(u"Email Body"), widget=forms.Textarea()),
}
recipient = Binding(_("Recipient"), type=Email, constant_use=ConstantUse.VARIABLE_OR_CONSTANT, required=True)
language = Binding(_("Language"), type=Language, constant_use=ConstantUse.VARIABLE_OR_CONSTANT, required=True)
fallback_language = Binding(
_("Fallback language"), type=Language, constant_use=ConstantUse.CONSTANT_ONLY, default="en"
)
send_identifier = Binding(
_("Send Identifier"), type=Text, constant_use=ConstantUse.CONSTANT_ONLY, required=False,
help_text=_(
"If set, this identifier will be logged into the event's log target. If the identifier has already "
"been logged, the e-mail won't be sent again."
)
)
def execute(self, context):
"""
:param context: Script Context
:type context: shuup.notify.script.Context
"""
recipient = self.get_value(context, "recipient")
if not recipient:
context.log(logging.INFO, "%s: Not sending mail, no recipient", self.identifier)
return
send_identifier = self.get_value(context, "send_identifier")
if send_identifier and context.log_entry_queryset.filter(identifier=send_identifier).exists():
context.log(
logging.INFO,
"%s: Not sending mail, have sent it already (%r)",
self.identifier,
send_identifier
)
return
languages = [language for language in [
self.get_value(context, "language"),
self.get_value(context, "fallback_language"),
] if language]
strings = self.get_template_values(context, languages)
subject = strings.get("subject")
body = strings.get("body")
if not (subject and body):
context.log(
logging.INFO,
"%s: Not sending mail to %s, either subject or body empty",
self.identifier,
recipient
)
return
subject = " ".join(subject.splitlines()) # Email headers may not contain newlines
message = EmailMessage(subject=subject, body=body, to=[recipient])
message.send()
context.log(logging.INFO, "%s: Mail sent to %s :)", self.identifier, recipient)
if send_identifier:
context.add_log_entry_on_log_target("Email sent to %s: %s" % (recipient, subject), send_identifier)
|
hrayr-artunyan/shuup
|
shuup/notify/actions/email.py
|
Python
|
agpl-3.0
| 3,323
|
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
bundles = VOLT.ServerBundle('rejoin',
needs_catalog=False,
supports_live=True,
default_host=False,
safemode_available=False,
supports_daemon=True),
description = 'Rejoin the current node to a VoltDB cluster.'
)
def rejoin(runner):
runner.go()
|
eoneil1942/voltdb-4.7fix
|
lib/python/voltcli/voltdb.d/rejoin.py
|
Python
|
agpl-3.0
| 1,711
|
# -*- coding: utf-8 -*-
"""Subliminal custom utils."""
from __future__ import unicode_literals
import hashlib
def hash_itasa(video_path):
"""Compute a hash using ItaSA's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 1024 * 1024 * 10
with open(video_path, 'rb') as f:
data = f.read(readsize)
return hashlib.md5(data).hexdigest()
|
fernandog/Medusa
|
medusa/subtitle_providers/utils.py
|
Python
|
gpl-3.0
| 425
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
|
drjova/zenodo
|
zenodo/modules/pages/__init__.py
|
Python
|
gpl-3.0
| 898
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_invoice_onshipping(osv.osv_memory):
def _get_journal(self, cr, uid, context=None):
journal_obj = self.pool.get('account.journal')
journal_type = self._get_journal_type(cr, uid, context=context)
journals = journal_obj.search(cr, uid, [('type', '=', journal_type)])
return journals and journals[0] or False
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
vals = []
pick = pickings and pickings[0]
if not pick or not pick.move_lines:
return 'sale'
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
type = pick.picking_type_id.code
if type == 'outgoing' and dest_usage == 'supplier':
journal_type = 'purchase_refund'
elif type == 'outgoing' and dest_usage == 'customer':
journal_type = 'sale'
elif type == 'incoming' and src_usage == 'supplier':
journal_type = 'purchase'
elif type == 'incoming' and src_usage == 'customer':
journal_type = 'sale_refund'
else:
journal_type = 'sale'
return journal_type
_name = "stock.invoice.onshipping"
_description = "Stock Invoice Onshipping"
_columns = {
'journal_id': fields.many2one('account.journal', 'Destination Journal', required=True),
'journal_type': fields.selection([('purchase_refund', 'Refund Purchase'), ('purchase', 'Create Supplier Invoice'),
('sale_refund', 'Refund Sale'), ('sale', 'Create Customer Invoice')], 'Journal Type', readonly=True),
'group': fields.boolean("Group by partner"),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'journal_type': _get_journal_type,
'journal_id' : _get_journal,
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(stock_invoice_onshipping, self).view_init(cr, uid, fields_list, context=context)
pick_obj = self.pool.get('stock.picking')
count = 0
active_ids = context.get('active_ids',[])
for pick in pick_obj.browse(cr, uid, active_ids, context=context):
if pick.invoice_state != '2binvoiced':
count += 1
if len(active_ids) == count:
raise osv.except_osv(_('Warning!'), _('None of these picking lists require invoicing.'))
return res
def open_invoice(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice_ids = self.create_invoice(cr, uid, ids, context=context)
if not invoice_ids:
raise osv.except_osv(_('Error!'), _('No invoice created!'))
data = self.browse(cr, uid, ids[0], context=context)
action_model = False
action = {}
journal2type = {'sale':'out_invoice', 'purchase':'in_invoice' , 'sale_refund':'out_refund', 'purchase_refund':'in_refund'}
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
data_pool = self.pool.get('ir.model.data')
if inv_type == "out_invoice":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree1')
elif inv_type == "in_invoice":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
elif inv_type == "out_refund":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree3')
elif inv_type == "in_refund":
action_id = data_pool.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree4')
if action_id:
action_pool = self.pool['ir.actions.act_window']
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,invoice_ids))+"])]"
return action
return True
def create_invoice(self, cr, uid, ids, context=None):
context = dict(context or {})
picking_pool = self.pool.get('stock.picking')
data = self.browse(cr, uid, ids[0], context=context)
journal2type = {'sale':'out_invoice', 'purchase':'in_invoice', 'sale_refund':'out_refund', 'purchase_refund':'in_refund'}
context['date_inv'] = data.invoice_date
acc_journal = self.pool.get("account.journal")
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
context['inv_type'] = inv_type
active_ids = context.get('active_ids', [])
res = picking_pool.action_invoice_create(cr, uid, active_ids,
journal_id = data.journal_id.id,
group = data.group,
type = inv_type,
context=context)
return res
|
diogocs1/comps
|
web/addons/stock_account/wizard/stock_invoice_onshipping.py
|
Python
|
apache-2.0
| 6,111
|
import os
from deveba.shell import shell
from deveba import utils
from deveba.handler import Handler, HandlerError, HandlerConflictError
class GitStatus(object):
"""
Parses the output of git status --porcelain -z into usable fields
"""
__slots__ = ["modified_files", "new_files", "conflicting_files"]
def __init__(self, out):
"""
out is the output of `git status --porcelain -z`
"""
CONFLICT_STATES = [
"DD",
"AU",
"UD",
"UA",
"DU",
"AA",
"UU",
]
self.modified_files = []
self.new_files = []
self.conflicting_files = []
for line in out.split("\0"):
if len(line) == 0:
continue
state = line[:2]
name = line[3:]
if state == "??":
self.new_files.append(name)
elif state in CONFLICT_STATES:
self.conflicting_files.append(name)
else:
self.modified_files.append(name)
def has_changes(self):
return bool(self.modified_files) or bool(self.new_files) or bool(self.conflicting_files)
class GitRepo(object):
"""
Helper class to run git commands
"""
__slots__ = ["path"]
def __init__(self, path):
self.path = path
@staticmethod
def _run_git(*args):
result = shell.git(*args)
if result.returncode != 0:
out = str(result.stdout, "utf-8").strip()
err = str(result.stderr, "utf-8").strip()
msg = []
msg.append("command: `git %s`" % " ".join(args))
if out:
msg.append("stdout: %s" % out)
if err:
msg.append("stderr: %s" % err)
raise HandlerError("\n".join(msg))
return str(result.stdout, "utf-8")
def run_git(self, *args):
old_cwd = os.getcwd()
os.chdir(self.path)
try:
return GitRepo._run_git(*args)
finally:
os.chdir(old_cwd)
@staticmethod
def clone(remote_repository_path, repository_path, *args):
GitRepo._run_git("clone", remote_repository_path, repository_path, *args)
return GitRepo(repository_path)
def get_status(self):
out = self.run_git("status", "--porcelain", "-z")
return GitStatus(out)
def need_push(self):
out = self.run_git("rev-list", "origin/master..")
return len(out.strip()) > 0
def add(self, *files):
self.run_git("add", *files)
def commit(self, msg, *args):
self.run_git("commit", "-m", msg, *args)
def need_merge(self):
out = self.run_git("rev-list", "..origin/master")
return len(out.strip()) > 0
def merge(self, remote):
try:
self.run_git("merge", remote)
except HandlerError as exc:
status = self.get_status()
if status.conflicting_files:
raise HandlerConflictError(status.conflicting_files)
else:
# Something else happened
raise
class GitHandler(Handler):
__slots__ = ["repo"]
def __init__(self, repo_path):
Handler.__init__(self)
self.repo = GitRepo(repo_path)
@classmethod
def create(cls, repo_path, options):
if not (repo_path / ".git").exists():
return None
return GitHandler(repo_path)
def __str__(self):
return "git: " + self.repo.path
def sync(self, ui):
def format_list(lst):
return "\n".join("- " + x for x in lst)
status = self.repo.get_status()
if status.has_changes():
while True:
ui.log_verbose("Modified files:\n%s\n\nNew files:\n%s\n"
% (format_list(status.modified_files), format_list(status.new_files))
)
choices = ["Commit", "Show Diff"]
answer = ui.question("Uncommitted changes detected", choices, "Commit")
if answer == "Commit":
ui.log_info("Committing changes")
self._commit(status.new_files)
break
elif answer == "Show Diff":
ui.log_verbose(self.repo.run_git("diff"))
elif answer == ui.CANCEL:
ui.log_warning("Cancelled commit")
break
self.repo.run_git("fetch")
if self.repo.need_merge():
if not ui.confirm("Upstream changes fetched, merge them?", True):
ui.log_warning("Cancelled merge")
return
ui.log_info("Merging upstream changes")
self.repo.merge("origin/master")
if self.repo.need_push():
if not ui.confirm("Local changes not pushed, push them?", True):
ui.log_warning("Cancelled push")
return
ui.log_info("Pushing changes")
self.repo.run_git("push")
def _commit(self, new_files):
if len(new_files) > 0:
self.repo.add(*new_files)
msg = utils.generate_commit_message(self.group)
author = "%s <%s>" % (utils.get_commit_author_name(), utils.get_commit_author_email())
self.repo.commit(msg, "-a", "--author", author)
|
agateau/deveba
|
deveba/githandler.py
|
Python
|
gpl-3.0
| 5,359
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for JIT compilation of functions with multiple results."""
import numpy as np
from tensorflow.compiler.mlir.tfrt.jit.python_binding import tf_jitrt
from tensorflow.python.platform import test
specializations = [
tf_jitrt.Specialization.ENABLED,
tf_jitrt.Specialization.DISABLED,
tf_jitrt.Specialization.ALWAYS,
]
jitrt = tf_jitrt.TfJitRtExecutor()
class MultipleResultsTest(test.TestCase):
def test_two_results(self):
for specialize in specializations:
mlir_function = """
func @test(%arg0: tensor<?xf32>) -> (tensor<?xf32>, tensor<?xf32>) {
%0 = "tf.Const"() { value = dense<1.0> : tensor<f32> }
: () -> tensor<f32>
%1 = "tf.AddV2"(%arg0, %0)
: (tensor<?xf32>, tensor<f32>) -> tensor<?xf32>
%2 = "tf.AddV2"(%1, %0)
: (tensor<?xf32>, tensor<f32>) -> tensor<?xf32>
return %1, %2 : tensor<?xf32>, tensor<?xf32>
}"""
compiled = jitrt.compile(mlir_function, 'test', specialize)
d0 = np.random.randint(1, 10)
arg0 = np.zeros(d0, np.float32)
[res0, res1] = jitrt.execute(compiled, [arg0])
np.testing.assert_allclose(res0, arg0 + 1.0, atol=0.0)
np.testing.assert_allclose(res1, arg0 + 2.0, atol=0.0)
def test_three_results(self):
for specialize in specializations:
mlir_function = """
func @test(%arg0: tensor<?xf32>) ->
(tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) {
%0 = "tf.Const"() { value = dense<1.0> : tensor<f32> }
: () -> tensor<f32>
%1 = "tf.AddV2"(%arg0, %0)
: (tensor<?xf32>, tensor<f32>) -> tensor<?xf32>
%2 = "tf.AddV2"(%1, %0)
: (tensor<?xf32>, tensor<f32>) -> tensor<?xf32>
%3 = "tf.AddV2"(%2, %0)
: (tensor<?xf32>, tensor<f32>) -> tensor<?xf32>
return %1, %2, %3 : tensor<?xf32>, tensor<?xf32>, tensor<?xf32>
}"""
compiled = jitrt.compile(mlir_function, 'test', specialize)
d0 = np.random.randint(1, 10)
arg0 = np.zeros(d0, np.float32)
[res0, res1, res2] = jitrt.execute(compiled, [arg0])
np.testing.assert_allclose(res0, arg0 + 1.0, atol=0.0)
np.testing.assert_allclose(res1, arg0 + 2.0, atol=0.0)
np.testing.assert_allclose(res2, arg0 + 3.0, atol=0.0)
def test_same_tensor_returned_twice(self):
for specialize in specializations:
mlir_function = """
func @test(%arg0: tensor<?xf32>) -> (tensor<?xf32>, tensor<?xf32>) {
%0 = "tf.Const"() { value = dense<1.0> : tensor<f32> }
: () -> tensor<f32>
%1 = "tf.AddV2"(%arg0, %0)
: (tensor<?xf32>, tensor<f32>) -> tensor<?xf32>
return %1, %1 : tensor<?xf32>, tensor<?xf32>
}"""
compiled = jitrt.compile(mlir_function, 'test', specialize)
d0 = np.random.randint(1, 10)
arg0 = np.zeros(d0, np.float32)
[res0, res1] = jitrt.execute(compiled, [arg0])
np.testing.assert_allclose(res0, arg0 + 1.0, atol=0.0)
np.testing.assert_allclose(res1, arg0 + 1.0, atol=0.0)
if __name__ == '__main__':
np.random.seed(0)
test.main()
|
tensorflow/tensorflow
|
tensorflow/compiler/mlir/tfrt/python_tests/multiple_results_test.py
|
Python
|
apache-2.0
| 3,867
|
"""
# A Better Where
WHERE2 is a near-linear time top-down clustering alogithm.
WHERE2 updated an older where with new Python tricks.
## Standard Header Stuff
"""
from __future__ import division, print_function
from pdb import set_trace
import sys
import types
from demos import *
from libWhere import *
from nasa93 import *
from settingsWhere import *
sys.dont_write_bytecode = True
sys.path.insert(0, '/Users/rkrsn/git/axe/axe/')
"""
## Dimensionality Reduction with Fastmap
Project data in N dimensions down to a single dimension connecting
twp distant points. Divide that data at the median of those projects.
"""
def pairs(lst):
for j in lst[0:]:
last = j
for i in lst[0:]:
yield last, i
def somepairs(m, data):
reps = 1; cmax = -10e32;
for _ in xrange(reps):
one = any(data);
two = furthest(m, one, data)
three = furthest(m, two, data)
c = dist(m, two, three) + 1e-5
if c >= cmax:
cmax = c;
east, west = two, three
return west, east
def allpairs(m, data):
cmax = -10e32
for one in data:
for two in [d for d in data if not d==1]:
c = dist(m, one, two)+1e-5;
if c >= cmax:
cmax = c;
east, west = one, two
return west, east
def fastmap(m, data):
"Divide data into two using distance to two distant items."
west, east = somepairs(m, data)
"""
one = any(data) # 1) pick anything
west = furthest(m, one, data) # 2) west is as far as you can go from anything
east = furthest(m, west, data) # 3) east is as far as you can go from west
"""
c = dist(m, west, east) + 1e-5
# now find everyone's distance
lst = []
for one in data:
a = dist(m, one, west)
b = dist(m, one, east)
x = (a * a + c * c - b * b) / (2 * c) # cosine rule
y = max(0, a ** 2 - x ** 2) ** 0.5 # not used, here for a demo
lst += [(x, one)]
lst = sorted(lst)
mid = len(lst) // 2
wests = map(second, lst[:mid])
easts = map(second, lst[mid:])
return wests, west, easts, east, c
def gt(x, y): return x > y
def lt(x, y): return x < y
"""
In the above:
+ _m_ is some model that generates candidate
solutions that we wish to niche.
+ _(west,east)_ are not _the_ most distant points
(that would require _N*N) distance
calculations). But they are at least very distant
to each other.
This code needs some helper functions. _Dist_ uses
the standard Euclidean measure. Note that you tune
what it uses to define the niches (decisions or
objectives) using the _what_ parameter:
"""
def dist(m, i, j,
what = lambda m: m.decisions):
"Euclidean distance 0 <= d <= 1 between decisions"
n = len(i.cells)
deltas = 0
for c in what(m):
n1 = norm(m, c, i.cells[c])
n2 = norm(m, c, j.cells[c])
inc = (n1 - n2) ** 2
deltas += inc
n += abs(m.w[c])
return deltas ** 0.5 / n ** 0.5
"""
The _Dist_ function normalizes all the raw values zero to one.
"""
def norm(m, c, val) :
"Normalizes val in col c within model m 0..1"
return (atom(val) - atom(m.lo[c])) / (atom(m.hi[c]) - atom(m.lo[c]) + 0.0001)
"""
Now we can define _furthest_:
"""
def furthest(m, i, all,
init = 0,
better = gt):
"find which of all is furthest from 'i'"
out, d = i, init
for j in all:
if i == j: continue
tmp = dist(m, i, j)
if better(tmp, d):
out, d = j, tmp
return out
"""
And of course, _closest_:
"""
def closest(m, i, all):
return furthest(m, i, all, init = 10 ** 32, better = lt)
"""
## WHERE2 = Recursive Fastmap
WHERE2 finds everyone's else's distance from the poles
and divide the data on the mean point of those
distances. This all stops if:
+ Any division has _tooFew_ solutions (say,
less than _sqrt_ of the total number of
solutions).
+ Something has gone horribly wrong and you are
recursing _tooDeep_
This code is controlled by the options in [_The_ settings](settingspy). For
example, if _The.pruning_ is true, we may ignore
some sub-tree (this process is discussed, later on).
Also, if _The.verbose_ is true, the _show_
function prints out a little tree showing the
progress (and to print indents in that tree, we use
the string _The.b4_). For example, here's WHERE2
dividing 93 examples from NASA93.
---| _where |-----------------
93
|.. 46
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. 47
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. 24
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
WHERE2 returns clusters, where each cluster contains
multiple solutions.
"""
def where2(m, data, lvl = 0, up = None, verbose = False):
node = o(val = None, _up = up, _kids = [])
def tooDeep(): return lvl > The.what.depthMax
def tooFew() : return len(data) < The.what.minSize
def show(suffix):
if verbose:
print(The.what.b4 * lvl, len(data),
suffix, ' ; ', id(node) % 1000, sep = '')
if tooDeep() or tooFew():
show(".")
node.val = data
else:
show("")
wests, west, easts, east, c = fastmap(m, data)
node.update(c = c, east = east, west = west)
goLeft, goRight = maybePrune(m, lvl, west, east)
if goLeft:
node._kids += [where2(m, wests, lvl + 1, node)]
if goRight:
node._kids += [where2(m, easts, lvl + 1, node)]
return node
"""
## An Experimental Extensions
Lately I've been experimenting with a system that
prunes as it divides the data. GALE checks for
domination between the poles and ignores data in
halves with a dominated pole. This means that for
_N_ solutions we only ever have to evaluate
_2*log(N)_ of them- which is useful if each
evaluation takes a long time.
The niches found in this way
contain non-dominated poles; i.e. they are
approximations to the Pareto frontier.
Preliminary results show that this is a useful
approach but you should treat those results with a
grain of salt.
In any case, this code supports that pruning as an
optional extra (and is enabled using the
_slots.pruning_ flag). In summary, this code says if
the scores for the poles are more different that
_slots.wriggle_ and one pole has a better score than
the other, then ignore the other pole.
"""
def maybePrune(m, lvl, west, east):
"Usually, go left then right, unless dominated."
goLeft, goRight = True, True # default
if The.prune and lvl >= The.what.depthMin:
sw = scores(m, west)
se = scores(m, east)
if abs(sw - se) > The.wriggle: # big enough to consider
if se > sw: goLeft = False # no left
if sw > se: goRight = False # no right
return goLeft, goRight
"""
Note that I do not allow pruning until we have
descended at least _slots.depthMin_ into the tree.
### Model-specific Stuff
WHERE2 talks to models via the the following model-specific variables:
+ _m.cols_: list of indices in a list
+ _m.names_: a list of names for each column.
+ _m.decisions_: the subset of cols relating to decisions.
+ _m.obectives_: the subset of cols relating to objectives.
+ _m.eval(m,eg)_: function for computing variables from _eg_.
+ _m.lo[c]_ : the lowest value in column _c_.
+ _m.hi[c]_ : the highest value in column _c_.
+ _m.w[c]_: the weight for each column. Usually equal to one.
If an objective and if we are minimizing that objective, then the weight is negative.
### Model-general stuff
Using the model-specific stuff, WHERE2 defines some
useful general functions.
"""
def some(m, x) :
"with variable x of model m, pick one value at random"
return m.lo[x] + by(m.hi[x] - m.lo[x])
def scores(m, it):
"Score an individual."
if not it.scored:
m.eval(m, it)
new, w = 0, 0
for c in m.objectives:
val = it.cells[c]
w += abs(m.w[c])
tmp = norm(m, c, val)
if m.w[c] < 0:
tmp = 1 - tmp
new += (tmp ** 2)
it.score = (new ** 0.5) / (w ** 0.5 + 1e-4)
it.scored = True
return it.score
"""
## Tree Code
Tools for manipulating the tree returned by _where2_.
### Primitive: Walk the nodes
"""
def nodes(tree, seen = None, steps = 0):
if seen is None: seen = []
if tree:
if not id(tree) in seen:
seen.append(id(tree))
yield tree, steps
for kid in tree._kids:
for sub, steps1 in nodes(kid, seen, steps + 1):
yield sub, steps1
"""
### Return nodes that are leaves
"""
def leaves(tree, seen = None, steps = 0):
for node, steps1 in nodes(tree, seen, steps):
if not node._kids:
yield node, steps1
"""
### Return nodes nearest to furthest
"""
# walk sideways..
def neighbors(leaf, seen = None, steps = -1):
"""Walk the tree from 'leaf' increasingly
distant leaves. """
if seen is None: seen = []
for down, steps1 in leaves(leaf, seen, steps + 1):
yield down, steps1
if leaf:
for up, steps1 in neighbors(leaf._up, seen, steps + 1):
yield up, steps1
"""
### Return nodes in Groups, Closest to Furthest
"""
def around(leaf, f = lambda x: x):
tmp, last = [], None
for node, dist in neighbors(leaf):
if dist > 0:
if dist == last:
tmp += [f(node)]
else:
if tmp:
yield last, tmp
tmp = [f(node)]
last = dist
if tmp:
yield last, tmp
"""
## Demo Code
### Code Showing the scores
"""
# @go
def _scores():
m = nasa93()
out = []
for row in m._rows:
scores(m, row)
out += [(row.score, [row.cells[c] for c in m.objectives])]
for s, x in sorted(out):
print(s, x)
"""
### Code Showing the Distances
"""
# @go
def _distances(m = nasa93):
m = m()
seed(The.seed)
for i in m._rows:
j = closest(m, i, m._rows)
k = furthest(m, i, m._rows)
idec = [i.cells[c] for c in m.decisions]
jdec = [j.cells[c] for c in m.decisions]
kdec = [k.cells[c] for c in m.decisions]
print("\n",
gs(idec), g(scores(m, i)), "\n",
gs(jdec), "closest ", g(dist(m, i, j)), "\n",
gs(kdec), "furthest", g(dist(m, i, k)))
"""
### A Demo for Where2.
"""
def prepare(m, settings = None):
"Prepare the 'The' class"
seed(1)
global The
The = settings if settings else defaults().update(verbose = True,
minSize = len(m._rows) ** 0.5,
prune = False,
wriggle = 0.3)
return The
def _where(m = nasa93):
m = m()
seed(1)
told = N()
for r in m._rows:
s = scores(m, r)
told += s
global The
The = defaults().update(verbose = True,
minSize = len(m._rows) ** 0.5,
prune = False,
wriggle = 0.3 * told.sd())
tree = where2(m, m._rows)
n = 0
for node, _ in leaves(tree):
ID = id(node) % 1000
print(node.val)
"""
print(m,' ',end="")
n += m
print(id(node) % 1000, ' ',end='')
for near,dist in neighbors(node):
print(dist,id(near) % 1000,' ',end='')
print("")
print(n)
filter = lambda z: id(z) % 1000
for node,_ in leaves(tree):
print(filter(node),
[x for x in around(node,filter)])
"""
|
ai-se/Tree-Learner
|
_imports/where2.py
|
Python
|
unlicense
| 11,309
|
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server side copy is a feature that enables users/clients to COPY objects
between accounts and containers without the need to download and then
re-upload objects, thus eliminating additional bandwidth consumption and
also saving time. This may be used when renaming/moving an object which
in Swift is a (COPY + DELETE) operation.
The server side copy middleware should be inserted in the pipeline after auth
and before the quotas and large object middlewares. If it is not present in the
pipeline in the proxy-server configuration file, it will be inserted
automatically. There is no configurable option provided to turn off server
side copy.
--------
Metadata
--------
* All metadata of source object is preserved during object copy.
* One can also provide additional metadata during PUT/COPY request. This will
over-write any existing conflicting keys.
* Server side copy can also be used to change content-type of an existing
object.
-----------
Object Copy
-----------
* The destination container must exist before requesting copy of the object.
* When several replicas exist, the system copies from the most recent replica.
That is, the copy operation behaves as though the X-Newest header is in the
request.
* The request to copy an object should have no body (i.e. content-length of the
request must be zero).
There are two ways in which an object can be copied:
1. Send a PUT request to the new object (destination/target) with an additional
header named ``X-Copy-From`` specifying the source object
(in '/container/object' format). Example::
curl -i -X PUT http://<storage_url>/container1/destination_obj
-H 'X-Auth-Token: <token>'
-H 'X-Copy-From: /container2/source_obj'
-H 'Content-Length: 0'
2. Send a COPY request with an existing object in URL with an additional header
named ``Destination`` specifying the destination/target object
(in '/container/object' format). Example::
curl -i -X COPY http://<storage_url>/container2/source_obj
-H 'X-Auth-Token: <token>'
-H 'Destination: /container1/destination_obj'
-H 'Content-Length: 0'
Note that if the incoming request has some conditional headers (e.g. ``Range``,
``If-Match``), the *source* object will be evaluated for these headers (i.e. if
PUT with both ``X-Copy-From`` and ``Range``, Swift will make a partial copy to
the destination object).
-------------------------
Cross Account Object Copy
-------------------------
Objects can also be copied from one account to another account if the user
has the necessary permissions (i.e. permission to read from container
in source account and permission to write to container in destination account).
Similar to examples mentioned above, there are two ways to copy objects across
accounts:
1. Like the example above, send PUT request to copy object but with an
additional header named ``X-Copy-From-Account`` specifying the source
account. Example::
curl -i -X PUT http://<host>:<port>/v1/AUTH_test1/container/destination_obj
-H 'X-Auth-Token: <token>'
-H 'X-Copy-From: /container/source_obj'
-H 'X-Copy-From-Account: AUTH_test2'
-H 'Content-Length: 0'
2. Like the previous example, send a COPY request but with an additional header
named ``Destination-Account`` specifying the name of destination account.
Example::
curl -i -X COPY http://<host>:<port>/v1/AUTH_test2/container/source_obj
-H 'X-Auth-Token: <token>'
-H 'Destination: /container/destination_obj'
-H 'Destination-Account: AUTH_test1'
-H 'Content-Length: 0'
-------------------
Large Object Copy
-------------------
The best option to copy a large object is to copy segments individually.
To copy the manifest object of a large object, add the query parameter to
the copy request::
?multipart-manifest=get
If a request is sent without the query parameter, an attempt will be made to
copy the whole object but will fail if the object size is
greater than 5GB.
-------------------
Object Post as Copy
-------------------
Historically, this has been a feature (and a configurable option with default
set to True) in proxy server configuration. This has been moved to server side
copy middleware and the default changed to False.
When ``object_post_as_copy`` is set to ``true``, an incoming POST request is
morphed into a COPY request where source and destination objects are same.
This feature was necessary because of a previous behavior where POSTS would
update the metadata on the object but not on the container. As a result,
features like container sync would not work correctly. This is no longer the
case and this option is now deprecated. It will be removed in a future release.
"""
import os
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
from six.moves.urllib.parse import quote, unquote
from swift.common import utils
from swift.common.utils import get_logger, \
config_true_value, FileLikeIter, read_conf_dir, close_if_possible
from swift.common.swob import Request, HTTPPreconditionFailed, \
HTTPRequestEntityTooLarge, HTTPBadRequest, HTTPException
from swift.common.http import HTTP_MULTIPLE_CHOICES, HTTP_CREATED, \
is_success, HTTP_OK
from swift.common.constraints import check_account_format, MAX_FILE_SIZE
from swift.common.request_helpers import copy_header_subset, remove_items, \
is_sys_meta, is_sys_or_user_meta, is_object_transient_sysmeta
from swift.common.wsgi import WSGIContext, make_subrequest
def _check_path_header(req, name, length, error_msg):
"""
Validate that the value of path-like header is
well formatted. We assume the caller ensures that
specific header is present in req.headers.
:param req: HTTP request object
:param name: header name
:param length: length of path segment check
:param error_msg: error message for client
:returns: A tuple with path parts according to length
:raise HTTPPreconditionFailed: if header value
is not well formatted.
"""
src_header = unquote(req.headers.get(name))
if not src_header.startswith('/'):
src_header = '/' + src_header
try:
return utils.split_path(src_header, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body=error_msg)
def _check_copy_from_header(req):
"""
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise HTTPPreconditionFailed: if x-copy-from value
is not well formatted.
"""
return _check_path_header(req, 'X-Copy-From', 2,
'X-Copy-From header must be of the form '
'<container name>/<object name>')
def _check_destination_header(req):
"""
Validate that the value from destination header is
well formatted. We assume the caller ensures that
destination header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise HTTPPreconditionFailed: if destination value
is not well formatted.
"""
return _check_path_header(req, 'Destination', 2,
'Destination header must be of the form '
'<container name>/<object name>')
def _copy_headers(src, dest):
"""
Will copy desired headers from src to dest.
:params src: an instance of collections.Mapping
:params dest: an instance of collections.Mapping
"""
for k, v in src.items():
if (is_sys_or_user_meta('object', k) or
is_object_transient_sysmeta(k) or
k.lower() == 'x-delete-at'):
dest[k] = v
class ServerSideCopyWebContext(WSGIContext):
def __init__(self, app, logger):
super(ServerSideCopyWebContext, self).__init__(app)
self.app = app
self.logger = logger
def get_source_resp(self, req):
sub_req = make_subrequest(
req.environ, path=req.path_info, headers=req.headers,
swift_source='SSC')
return sub_req.get_response(self.app)
def send_put_req(self, req, additional_resp_headers, start_response):
app_resp = self._app_call(req.environ)
self._adjust_put_response(req, additional_resp_headers)
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
def _adjust_put_response(self, req, additional_resp_headers):
if 'swift.post_as_copy' in req.environ:
# Older editions returned 202 Accepted on object POSTs, so we'll
# convert any 201 Created responses to that for compatibility with
# picky clients.
if self._get_status_int() == HTTP_CREATED:
self._response_status = '202 Accepted'
elif is_success(self._get_status_int()):
for header, value in additional_resp_headers.items():
self._response_headers.append((header, value))
def handle_OPTIONS_request(self, req, start_response):
app_resp = self._app_call(req.environ)
if is_success(self._get_status_int()):
for i, (header, value) in enumerate(self._response_headers):
if header.lower() == 'allow' and 'COPY' not in value:
self._response_headers[i] = ('Allow', value + ', COPY')
if header.lower() == 'access-control-allow-methods' and \
'COPY' not in value:
self._response_headers[i] = \
('Access-Control-Allow-Methods', value + ', COPY')
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class ServerSideCopyMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route="copy")
# Read the old object_post_as_copy option from Proxy app just in case
# someone has set it to false (non default). This wouldn't cause
# problems during upgrade.
self._load_object_post_as_copy_conf(conf)
self.object_post_as_copy = \
config_true_value(conf.get('object_post_as_copy', 'false'))
if self.object_post_as_copy:
msg = ('object_post_as_copy=true is deprecated; remove all '
'references to it from %s to disable this warning. This '
'option will be ignored in a future release' % conf.get(
'__file__', 'proxy-server.conf'))
self.logger.warning(msg)
def _load_object_post_as_copy_conf(self, conf):
if ('object_post_as_copy' in conf or '__file__' not in conf):
# Option is explicitly set in middleware conf. In that case,
# we assume operator knows what he's doing.
# This takes preference over the one set in proxy app
return
cp = ConfigParser()
if os.path.isdir(conf['__file__']):
read_conf_dir(cp, conf['__file__'])
else:
cp.read(conf['__file__'])
try:
pipe = cp.get("pipeline:main", "pipeline")
except (NoSectionError, NoOptionError):
return
proxy_name = pipe.rsplit(None, 1)[-1]
proxy_section = "app:" + proxy_name
try:
conf['object_post_as_copy'] = cp.get(proxy_section,
'object_post_as_copy')
except (NoSectionError, NoOptionError):
pass
def __call__(self, env, start_response):
req = Request(env)
try:
(version, account, container, obj) = req.split_path(4, 4, True)
except ValueError:
# If obj component is not present in req, do not proceed further.
return self.app(env, start_response)
self.account_name = account
self.container_name = container
self.object_name = obj
try:
# In some cases, save off original request method since it gets
# mutated into PUT during handling. This way logging can display
# the method the client actually sent.
if req.method == 'PUT' and req.headers.get('X-Copy-From'):
return self.handle_PUT(req, start_response)
elif req.method == 'COPY':
req.environ['swift.orig_req_method'] = req.method
return self.handle_COPY(req, start_response)
elif req.method == 'POST' and self.object_post_as_copy:
req.environ['swift.orig_req_method'] = req.method
return self.handle_object_post_as_copy(req, start_response)
elif req.method == 'OPTIONS':
# Does not interfere with OPTIONS response from
# (account,container) servers and /info response.
return self.handle_OPTIONS(req, start_response)
except HTTPException as e:
return e(req.environ, start_response)
return self.app(env, start_response)
def handle_object_post_as_copy(self, req, start_response):
req.method = 'PUT'
req.path_info = '/v1/%s/%s/%s' % (
self.account_name, self.container_name, self.object_name)
req.headers['Content-Length'] = 0
req.headers.pop('Range', None)
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
self.object_name))
req.environ['swift.post_as_copy'] = True
params = req.params
# for post-as-copy always copy the manifest itself if source is *LO
params['multipart-manifest'] = 'get'
req.params = params
return self.handle_PUT(req, start_response)
def handle_COPY(self, req, start_response):
if not req.headers.get('Destination'):
return HTTPPreconditionFailed(request=req,
body='Destination header required'
)(req.environ, start_response)
dest_account = self.account_name
if 'Destination-Account' in req.headers:
dest_account = req.headers.get('Destination-Account')
dest_account = check_account_format(req, dest_account)
req.headers['X-Copy-From-Account'] = self.account_name
self.account_name = dest_account
del req.headers['Destination-Account']
dest_container, dest_object = _check_destination_header(req)
source = '/%s/%s' % (self.container_name, self.object_name)
self.container_name = dest_container
self.object_name = dest_object
# re-write the existing request as a PUT instead of creating a new one
req.method = 'PUT'
# As this the path info is updated with destination container,
# the proxy server app will use the right object controller
# implementation corresponding to the container's policy type.
ver, _junk = req.split_path(1, 2, rest_with_last=True)
req.path_info = '/%s/%s/%s/%s' % \
(ver, dest_account, dest_container, dest_object)
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote(source)
del req.headers['Destination']
return self.handle_PUT(req, start_response)
def _get_source_object(self, ssc_ctx, source_path, req):
source_req = req.copy_get()
# make sure the source request uses it's container_info
source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
source_req.path_info = quote(source_path)
source_req.headers['X-Newest'] = 'true'
if 'swift.post_as_copy' in req.environ:
# We're COPYing one object over itself because of a POST; rely on
# the PUT for write authorization, don't require read authorization
source_req.environ['swift.authorize'] = lambda req: None
source_req.environ['swift.authorize_override'] = True
# in case we are copying an SLO manifest, set format=raw parameter
params = source_req.params
if params.get('multipart-manifest') == 'get':
params['format'] = 'raw'
source_req.params = params
source_resp = ssc_ctx.get_source_resp(source_req)
if source_resp.content_length is None:
# This indicates a transfer-encoding: chunked source object,
# which currently only happens because there are more than
# CONTAINER_LISTING_LIMIT segments in a segmented object. In
# this case, we're going to refuse to do the server-side copy.
close_if_possible(source_resp.app_iter)
return HTTPRequestEntityTooLarge(request=req)
if source_resp.content_length > MAX_FILE_SIZE:
close_if_possible(source_resp.app_iter)
return HTTPRequestEntityTooLarge(request=req)
return source_resp
def _create_response_headers(self, source_path, source_resp, sink_req):
resp_headers = dict()
acct, path = source_path.split('/', 3)[2:4]
resp_headers['X-Copied-From-Account'] = quote(acct)
resp_headers['X-Copied-From'] = quote(path)
if 'last-modified' in source_resp.headers:
resp_headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
# Existing sys and user meta of source object is added to response
# headers in addition to the new ones.
_copy_headers(sink_req.headers, resp_headers)
return resp_headers
def handle_PUT(self, req, start_response):
if req.content_length:
return HTTPBadRequest(body='Copy requests require a zero byte '
'body', request=req,
content_type='text/plain')(req.environ,
start_response)
# Form the path of source object to be fetched
ver, acct, _rest = req.split_path(2, 3, True)
src_account_name = req.headers.get('X-Copy-From-Account')
if src_account_name:
src_account_name = check_account_format(req, src_account_name)
else:
src_account_name = acct
src_container_name, src_obj_name = _check_copy_from_header(req)
source_path = '/%s/%s/%s/%s' % (ver, src_account_name,
src_container_name, src_obj_name)
if req.environ.get('swift.orig_req_method', req.method) != 'POST':
self.logger.info("Copying object from %s to %s" %
(source_path, req.path))
# GET the source object, bail out on error
ssc_ctx = ServerSideCopyWebContext(self.app, self.logger)
source_resp = self._get_source_object(ssc_ctx, source_path, req)
if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
return source_resp(source_resp.environ, start_response)
# Create a new Request object based on the original request instance.
# This will preserve original request environ including headers.
sink_req = Request.blank(req.path_info, environ=req.environ)
def is_object_sysmeta(k):
return is_sys_meta('object', k)
if 'swift.post_as_copy' in sink_req.environ:
# Post-as-copy: ignore new sysmeta, copy existing sysmeta
remove_items(sink_req.headers, is_object_sysmeta)
copy_header_subset(source_resp, sink_req, is_object_sysmeta)
elif config_true_value(req.headers.get('x-fresh-metadata', 'false')):
# x-fresh-metadata only applies to copy, not post-as-copy: ignore
# existing user metadata, update existing sysmeta with new
copy_header_subset(source_resp, sink_req, is_object_sysmeta)
copy_header_subset(req, sink_req, is_object_sysmeta)
else:
# First copy existing sysmeta, user meta and other headers from the
# source to the sink, apart from headers that are conditionally
# copied below and timestamps.
exclude_headers = ('x-static-large-object', 'x-object-manifest',
'etag', 'content-type', 'x-timestamp',
'x-backend-timestamp')
copy_header_subset(source_resp, sink_req,
lambda k: k.lower() not in exclude_headers)
# now update with original req headers
sink_req.headers.update(req.headers)
params = sink_req.params
if params.get('multipart-manifest') == 'get':
if 'X-Static-Large-Object' in source_resp.headers:
params['multipart-manifest'] = 'put'
if 'X-Object-Manifest' in source_resp.headers:
del params['multipart-manifest']
if 'swift.post_as_copy' not in sink_req.environ:
sink_req.headers['X-Object-Manifest'] = \
source_resp.headers['X-Object-Manifest']
sink_req.params = params
# Set swift.source, data source, content length and etag
# for the PUT request
sink_req.environ['swift.source'] = 'SSC'
sink_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter)
sink_req.content_length = source_resp.content_length
if (source_resp.status_int == HTTP_OK and
'X-Static-Large-Object' not in source_resp.headers and
('X-Object-Manifest' not in source_resp.headers or
req.params.get('multipart-manifest') == 'get')):
# copy source etag so that copied content is verified, unless:
# - not a 200 OK response: source etag may not match the actual
# content, for example with a 206 Partial Content response to a
# ranged request
# - SLO manifest: etag cannot be specified in manifest PUT; SLO
# generates its own etag value which may differ from source
# - SLO: etag in SLO response is not hash of actual content
# - DLO: etag in DLO response is not hash of actual content
sink_req.headers['Etag'] = source_resp.etag
else:
# since we're not copying the source etag, make sure that any
# container update override values are not copied.
remove_items(sink_req.headers, lambda k: k.startswith(
'X-Object-Sysmeta-Container-Update-Override-'))
# We no longer need these headers
sink_req.headers.pop('X-Copy-From', None)
sink_req.headers.pop('X-Copy-From-Account', None)
# If the copy request does not explicitly override content-type,
# use the one present in the source object.
if not req.headers.get('content-type'):
sink_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
# Create response headers for PUT response
resp_headers = self._create_response_headers(source_path,
source_resp, sink_req)
put_resp = ssc_ctx.send_put_req(sink_req, resp_headers, start_response)
close_if_possible(source_resp.app_iter)
return put_resp
def handle_OPTIONS(self, req, start_response):
return ServerSideCopyWebContext(self.app, self.logger).\
handle_OPTIONS_request(req, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def copy_filter(app):
return ServerSideCopyMiddleware(app, conf)
return copy_filter
|
psachin/swift
|
swift/common/middleware/copy.py
|
Python
|
apache-2.0
| 24,612
|
from itertools import combinations_with_replacement
import pytest
from aioredis.errors import ReplyError
@pytest.mark.asyncio
async def test_sadd(redis):
ret = await redis.sadd("foo", "bar")
assert ret == 1
assert redis._redis.smembers("foo") == {b"bar"}
@pytest.mark.asyncio
async def test_sadd_many(redis):
ret = await redis.sadd("foo", "bar", "baz")
assert ret == 2
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_scard(redis):
redis._redis.sadd("foo", "bar")
assert await redis.scard("foo") == 1
redis._redis.sadd("foo", "bar", "baz")
assert await redis.scard("foo") == 2
@pytest.mark.asyncio
async def test_sdiff(redis):
redis._redis.sadd("foo_1", "bar", "baz")
redis._redis.sadd("foo_2", "no bar", "baz", "bazzo")
assert await redis.sdiff("foo_1", "foo_2") == [b"bar"]
assert set(await redis.sdiff("foo_2", "foo_1")) == {b"bazzo", b"no bar"}
@pytest.mark.asyncio
async def test_sdiffstore(redis):
redis._redis.sadd("foo_1", "bar", "baz")
redis._redis.sadd("foo_2", "no bar", "baz", "bazzo")
assert await redis.sdiffstore("foo_3", "foo_1", "foo_2") == 1
assert redis._redis.smembers("foo_3") == {b"bar"}
assert await redis.sdiffstore("foo_1", "foo_1", "foo_2") == 1
assert redis._redis.smembers("foo_1") == {b"bar"}
@pytest.mark.asyncio
async def test_sinter(redis):
redis._redis.sadd("foo_1", "bar", "baz")
redis._redis.sadd("foo_2", "no bar", "baz", "bazzo")
assert await redis.sinter("foo_1", "foo_2") == [b"baz"]
@pytest.mark.asyncio
async def test_sinterstore(redis):
redis._redis.sadd("foo_1", "bar", "baz")
redis._redis.sadd("foo_2", "no bar", "baz", "bazzo")
assert await redis.sinterstore("foo_3", "foo_1", "foo_2") == 1
assert redis._redis.smembers("foo_3") == {b"baz"}
assert await redis.sinterstore("foo_1", "foo_1", "foo_2") == 1
assert redis._redis.smembers("foo_1") == {b"baz"}
@pytest.mark.asyncio
async def test_sismember(redis):
redis._redis.sadd("foo", "bar", "baz")
assert await redis.sismember("foo", "bar") is 1
assert await redis.sismember("foo", "barbar") is 0
@pytest.mark.asyncio
async def test_smembers(redis):
redis._redis.sadd("foo", "bar", "baz")
members = await redis.smembers("foo")
assert sorted(members) == [b"bar", b"baz"]
members = await redis.smembers("foo", encoding="utf8")
assert sorted(members) == ["bar", "baz"]
@pytest.mark.asyncio
async def test_smove(redis):
redis._redis.sadd("foo_1", "bar")
redis._redis.sadd("foo_2", "baz")
assert await redis.smove("foo_1", "foo_2", "bar") is 1
assert redis._redis.smembers("foo_1") == set()
assert redis._redis.smembers("foo_2") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_smove_bad_member(redis):
redis._redis.sadd("foo_1", "bar")
redis._redis.sadd("foo_2", "baz")
assert await redis.smove("foo_2", "foo_1", "barbar") is 0
assert redis._redis.smembers("foo_1") == {b"bar"}
assert redis._redis.smembers("foo_2") == {b"baz"}
@pytest.mark.asyncio
async def test_smove_bad_key(redis):
redis._redis.sadd("foo_1", "bar")
redis._redis.sadd("foo_2", "baz")
assert await redis.smove("foo_3", "foo_1", "baz") is 0
assert redis._redis.smembers("foo_1") == {b"bar"}
assert redis._redis.smembers("foo_2") == {b"baz"}
@pytest.mark.asyncio
async def test_smove_new_key(redis):
redis._redis.sadd("foo_1", "bar")
assert await redis.smove("foo_1", "foo_2", "bar") is 1
assert redis._redis.smembers("foo_1") == set()
assert redis._redis.smembers("foo_2") == {b"bar"}
@pytest.mark.asyncio
async def test_smove_bad_type(redis):
redis._redis.set("foo_1", "1")
redis._redis.sadd("foo_2", "bar")
with pytest.raises(ReplyError):
assert await redis.smove("foo_1", "foo_2", "1")
with pytest.raises(ReplyError):
assert await redis.smove("foo_2", "foo_1", "bar")
assert redis._redis.get("foo_1") == b"1"
assert redis._redis.smembers("foo_2") == {b"bar"}
@pytest.mark.asyncio
async def test_spop_single(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.spop("foo", 1))[0] in [b"bar", b"baz"]
assert redis._redis.smembers("foo") in ({b"bar"}, {b"baz"})
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.spop("foo", 1, encoding="utf8"))[0] in ["bar", "baz"]
assert redis._redis.smembers("foo") in ({b"bar"}, {b"baz"})
@pytest.mark.asyncio
async def test_spop_zero_count(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.spop("foo", 0)) == []
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_spop_negative_count(redis):
redis._redis.sadd("foo", "bar", "baz")
with pytest.raises(ReplyError):
await redis.spop("foo", -1)
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_spop_null_count(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.spop("foo", None)) in [b"bar", b"baz"]
assert redis._redis.smembers("foo") in ({b"bar"}, {b"baz"})
@pytest.mark.asyncio
async def test_spop_default_count(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.spop("foo")) in [b"bar", b"baz"]
assert redis._redis.smembers("foo") in ({b"bar"}, {b"baz"})
@pytest.mark.asyncio
async def test_spop_all_items(redis):
redis._redis.sadd("foo", "bar", "baz")
assert sorted(await redis.spop("foo", 2)) == [b"bar", b"baz"]
assert redis._redis.smembers("foo") == set()
@pytest.mark.asyncio
async def test_spop_too_many_items(redis):
redis._redis.sadd("foo", "bar", "baz")
assert sorted(await redis.spop("foo", 3)) == [b"bar", b"baz"]
assert redis._redis.smembers("foo") == set()
@pytest.mark.asyncio
async def test_srandmember_single(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.srandmember("foo", 1))[0] in [b"bar", b"baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_zero_count(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.srandmember("foo", 0)) == []
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_two_items_allow_dup(redis):
for i in range(20):
redis._redis.sadd("foo", "bar", "baz")
ret = await redis.srandmember("foo", -2)
assert isinstance(ret, list)
assert tuple(sorted(ret)) in list(
combinations_with_replacement([b"bar", b"baz"], 2)
)
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_too_many_items_allow_dup(redis):
for i in range(20):
redis._redis.sadd("foo", "bar", "baz")
ret = await redis.srandmember("foo", -3)
assert isinstance(ret, list)
assert tuple(sorted(ret)) in list(
combinations_with_replacement([b"bar", b"baz"], 3)
)
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_null_count(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.srandmember("foo", None)) in [b"bar", b"baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_default_count(redis):
redis._redis.sadd("foo", "bar", "baz")
assert (await redis.srandmember("foo")) in [b"bar", b"baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_all_items_unique(redis):
redis._redis.sadd("foo", "bar", "baz")
assert sorted(await redis.srandmember("foo", 2)) == [b"bar", b"baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_too_many_items_unique(redis):
redis._redis.sadd("foo", "bar", "baz")
assert sorted(await redis.srandmember("foo", 3)) == [b"bar", b"baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_one_item_encoding(redis):
redis._redis.sadd("foo", "bar", "baz")
assert await redis.srandmember("foo", encoding="utf8") in ["bar", "baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srandmember_two_items_encoding(redis):
redis._redis.sadd("foo", "bar", "baz")
assert sorted(await redis.srandmember("foo", 2, encoding="utf8")) == ["bar", "baz"]
assert redis._redis.smembers("foo") == {b"bar", b"baz"}
@pytest.mark.asyncio
async def test_srem(redis):
redis._redis.sadd("foo", "bar", "baz")
assert await redis.srem("foo", "bar") is 1
redis._redis.sadd("foo", "bar", "baz")
assert await redis.srem("foo", "bar", "baz") is 2
redis._redis.sadd("foo", "bar", "baz")
assert await redis.srem("foo", "bar", "baz", "bazzo") is 2
@pytest.mark.asyncio
async def test_sunion(redis):
redis._redis.sadd("foo_1", "bar", "baz")
redis._redis.sadd("foo_2", "baz", "bazzo")
assert sorted(await redis.sunion("foo_1", "foo_2")) == [b"bar", b"baz", b"bazzo"]
assert sorted(await redis.sunion("foo_1", "foo_3")) == [b"bar", b"baz"]
assert redis._redis.smembers("foo_1") == {b"bar", b"baz"}
assert redis._redis.smembers("foo_2") == {b"baz", b"bazzo"}
@pytest.mark.asyncio
async def test_sunionstore(redis):
redis._redis.sadd("foo_1", "bar", "baz")
redis._redis.sadd("foo_2", "baz", "bazzo")
assert await redis.sunionstore("foo_3", "foo_1", "foo_2") == 3
assert redis._redis.smembers("foo_1") == {b"bar", b"baz"}
assert redis._redis.smembers("foo_2") == {b"baz", b"bazzo"}
assert redis._redis.smembers("foo_3") == {b"bar", b"baz", b"bazzo"}
@pytest.mark.asyncio
async def test_sscan(redis):
values = ["bar", "baz", "bazzo", "barbar"]
b_values = [b"bar", b"barbar", b"baz", b"bazzo"]
redis._redis.sadd("foo_1", *values)
async def sscan(key, *args, **kwargs):
# Return order is inconsistent between redis and fake redis
resp = await redis.sscan(key, *args, **kwargs)
return resp[0], sorted(resp[1])
assert await sscan("foo_1") == (0, b_values)
assert await sscan("foo_1", 0, count=10) == (0, b_values)
assert await sscan("foo_1", match="bar") == (0, b_values[:1])
assert await sscan("foo_1", match="bar*") == (0, b_values[:2])
resp = await sscan("foo_1", 10)
assert isinstance(resp[0], int)
assert isinstance(resp[1], list) # Elements returned are undefined
resp = await sscan("foo_1", -10)
assert isinstance(resp[0], int)
assert isinstance(resp[1], list) # Elements returned are undefined
@pytest.mark.asyncio
async def test_isscan(redis):
values = ["bar", "baz", "bazzo", "barbar"]
b_values = {b"bar", b"barbar", b"baz", b"bazzo"}
redis._redis.sadd("foo_1", *values)
values = {val async for val in redis.isscan("foo_1")}
assert values == b_values
values = {val async for val in redis.isscan("foo_1", count=5)}
assert values == b_values
values = {val async for val in redis.isscan("foo_1", match="bar")}
assert values == {b"bar"}
values = {val async for val in redis.isscan("foo_1", match="bar*")}
assert values == {b"bar", b"barbar"}
|
kblin/mockaioredis
|
tests/test_set.py
|
Python
|
apache-2.0
| 11,392
|
# Copyright 2020 Zadara Storage, Inc.
# Originally authored by Jeremy Brown - https://github.com/jwbrown77
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zadarapy.validators import verify_versioning, is_valid_minutes, verify_expire_version
def set_versioning(session, bucket_name, versioning, archive_name, return_type=None, **kwargs):
"""
Set versioning in a bucket
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type bucket_name: str
:param bucket_name: Name of the bucket. Required.
:type versioning: str
:param versioning: Type of versioning.
Can be only x-versions-location or x-history-location. Required.
:type archive_name: str
:param archive_name: Name of the the archive to be created. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
versioning = verify_versioning(versioning)
path = "/{0}".format(bucket_name)
headers = {versioning: archive_name}
return session.put_api(path=path, additional_headers=headers, return_type=return_type,
use_port=False, return_header=True, **kwargs)
def add_lifecycle_policy(session, bucket_name, objects_minutes_expiry, objects_expire_version,
objects_name_prefix, other_policies=None, return_type=None, **kwargs):
"""
Add lifecycle policy expiry to a bucket
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type bucket_name: str
:param bucket_name: Name of the bucket. Required.
:type objects_minutes_expiry: int
:param objects_minutes_expiry: How many minutes will the object be valid and won't expire. Required.
:type objects_expire_version: str
:param objects_expire_version: Type of expiration versioning.
Can be only current or previous. Required.
:type objects_name_prefix: str
:param objects_name_prefix: Prefix for objects name. Required.
:type other_policies: str
:param other_policies: Other lifecycle policies.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
objects_minutes_expiry = is_valid_minutes(objects_minutes_expiry)
objects_expire_version = verify_expire_version(objects_expire_version)
policies = "{\"prefix\": \"%s\", \"%s\": %s}" % (objects_name_prefix,
objects_expire_version, objects_minutes_expiry)
if other_policies is not None:
policies += "," + other_policies.replace("[", "").replace("]", "")
return set_expiry_lifecycle_policy(session=session, bucket_name=bucket_name, policies=policies)
def set_expiry_lifecycle_policy(session, bucket_name, policies="", return_type=None, **kwargs):
"""
Set lifecycle policy expiry in a bucket
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type bucket_name: str
:param bucket_name: Name of the bucket. Required.
:type policies: str
:param policies: Expiry lifecycle policy expiry policy to set.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
api = "https://{}/{}".format(session.zadara_host, bucket_name)
cmd = "curl %s -X PUT -H 'x-auth-token: %s' -H " \
"'x-container-meta-objectexpirer: [%s]'" \
% (api, session.zadara_key, policies)
return run_outside_of_api(cmd)
def create_container(session, name, storage_policy, return_type=None, **kwargs):
"""
Get a bucketfrom ZIOS.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type name: str
:param name: Name of the container. Required.
:type storage_policy: str
:param storage_policy: Name of the storage policy e.g. 2-Way-Protection. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = "/{0}".format(name)
headers = {"x-storage-policy": storage_policy}
return session.put_api(path=path, additional_headers=headers, use_port=False,
return_type=return_type, return_header=True, **kwargs)
def run_outside_of_api(cmd):
"""
Run command outside of the usual session API due to python limitations
e.g. when we need to run a command that get headers with a single quote
and other headers with double quotes python built-in modules fail to do it
for instance wecan't run this API -
curl 'https://vsa-0000007e-zadara-qa10.zadarazios.com:443/v1/AUTH_20db47cfaaff46079861b917116decf7/nirhayuntest'
-X PUT -H 'x-auth-token: gAAAAABelWkqs7uouuMBd5EPopY2HCkQYQKEatQ6Lt52ThEpTNvUKcTBi7pR3iZS2_Wzufgr7GD4unsQlWRb0f'
-H 'x-container-meta-objectexpirer: [{"prefix": "HAYUN", "curver_after": 259200}]'
(one header needs to be qith single qutes and the dict in the second header needs to be with a double quote
:type cmd: str
:param cmd: A valid ZPI command to execute. Required.
"""
from subprocess import Popen, PIPE
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
res = {}
(res["output"], err) = p.communicate()
if p.returncode != 0:
raise AssertionError("Failed to execute commnad: {0}\n{1}".format(cmd, err))
res["status"] = "success"
return res
|
zadarastorage/zadarapy
|
zadarapy/vpsaos/container.py
|
Python
|
apache-2.0
| 7,130
|
"""empty message
Revision ID: 05ef92815fbd
Revises: d6a54eeeaeb9
Create Date: 2020-12-28 12:29:49.628089
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '05ef92815fbd'
down_revision = 'd6a54eeeaeb9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('jobs', sa.Column('customer_id', sa.Integer(), nullable=False))
op.add_column('jobs', sa.Column('hashfile_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'jobs', 'customers', ['customer_id'], ['id'])
op.create_foreign_key(None, 'jobs', 'hashfiles', ['hashfile_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'jobs', type_='foreignkey')
op.drop_constraint(None, 'jobs', type_='foreignkey')
op.drop_column('jobs', 'hashfile_id')
op.drop_column('jobs', 'customer_id')
# ### end Alembic commands ###
|
hashview/hashview
|
migrations/versions/05ef92815fbd_.py
|
Python
|
gpl-3.0
| 1,053
|
import os
from admin_scripts.tests import AdminScriptTestCase
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.db import connection
from django.test import SimpleTestCase, mock, override_settings
from django.test.utils import captured_stderr, extend_sys_path
from django.utils import translation
from django.utils._os import upath
from django.utils.six import StringIO
from .management.commands import dance
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
with self.assertRaises(CommandError):
management.call_command(('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
with translation.override('pl'):
result = management.call_command('leave_locale_alone_false', stdout=StringIO())
self.assertIsNone(result)
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
with translation.override('pl'):
result = management.call_command('leave_locale_alone_true', stdout=StringIO())
self.assertEqual(result, "pl")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Test that management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
def test_call_command_option_parsing_non_string_arg(self):
"""
It should be possible to pass non-string arguments to call_command.
"""
out = StringIO()
management.call_command('dance', 1, verbosity=0, stdout=out)
self.assertIn("You passed 1 as a positional argument.", out.getvalue())
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
with self.assertRaises(CommandError):
management.call_command('hal', stdout=StringIO())
def test_output_transaction(self):
output = management.call_command('transaction', stdout=StringIO(), no_color=True)
self.assertTrue(output.strip().startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.strip().endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter = self.counter + 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
def test_check_migrations(self):
requires_migrations_checks = dance.Command.requires_migrations_checks
self.assertEqual(requires_migrations_checks, False)
try:
with mock.patch.object(BaseCommand, 'check_migrations') as check_migrations:
management.call_command('dance', verbosity=0)
self.assertFalse(check_migrations.called)
dance.Command.requires_migrations_checks = True
management.call_command('dance', verbosity=0)
self.assertTrue(check_migrations.called)
finally:
dance.Command.requires_migrations_checks = requires_migrations_checks
class CommandRunTests(AdminScriptTestCase):
"""
Tests that need to run by simulating the command line, not by call_command.
"""
def tearDown(self):
self.remove_settings('settings.py')
def test_script_prefix_set_in_commands(self):
self.write_settings('settings.py', apps=['user_commands'], sdict={
'ROOT_URLCONF': '"user_commands.urls"',
'FORCE_SCRIPT_NAME': '"/PREFIX/"',
})
out, err = self.run_manage(['reverse_url'])
self.assertNoOutput(err)
self.assertEqual(out.strip(), '/PREFIX/some/url/')
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
with self.assertRaises(CommandError):
popen_wrapper(['a_42_command_that_doesnt_exist_42'])
|
filias/django
|
tests/user_commands/tests.py
|
Python
|
bsd-3-clause
| 8,086
|
import errno
import os
import pwd
import shutil
import sys
from jinja2 import Environment, FileSystemLoader
class TutorialEnv:
LOCAL_MACHINE = ("Local Machine Condor Pool", "submit-host")
USC_HPCC_CLUSTER = ("USC HPCC Cluster", "usc-hpcc")
OSG_FROM_ISI = ("OSG from ISI submit node", "osg")
XSEDE_BOSCO = ("XSEDE, with Bosco", "xsede-bosco")
BLUEWATERS_GLITE = ("Bluewaters, with Glite", "bw-glite")
TACC_WRANGLER = ("TACC Wrangler with Glite", "wrangler-glite")
OLCF_TITAN = ("OLCF TITAN with Glite", "titan-glite")
OLCF_SUMMIT_KUBERNETES_BOSCO = (
"OLCF Summit from Kubernetes using BOSCO",
"summit-kub-bosco",
)
class TutorialExample:
PROCESS = ("Process", "process")
PIPELINE = ("Pipeline", "pipeline")
SPLIT = ("Split", "split")
MERGE = ("Merge", "merge")
EPA = ("EPA (requires R)", "r-epa")
DIAMOND = ("Diamond", "diamond")
CONTAINER = ("Population Modeling using Containers", "population")
MPI = ("MPI Hello World", "mpi-hw")
def choice(question, options, default):
"Ask the user to choose from a short list of named options"
while True:
sys.stdout.write("{} ({}) [{}]: ".format(question, "/".join(options), default))
answer = sys.stdin.readline().strip()
if len(answer) == 0:
return default
for opt in options:
if answer == opt:
return answer
def yesno(question, default="y"):
"Ask the user a yes/no question"
while True:
sys.stdout.write("{} (y/n) [{}]: ".format(question, default))
answer = sys.stdin.readline().strip().lower()
if len(answer) == 0:
answer = default
if answer == "y":
return True
elif answer == "n":
return False
def query(question, default=None):
"Ask the user a question and return the response"
while True:
if default:
sys.stdout.write("{} [{}]: ".format(question, default))
else:
sys.stdout.write("%s: " % question)
answer = sys.stdin.readline().strip().replace(" ", "_")
if answer == "":
if default:
return default
else:
return answer
def optionlist(question, options, default=0):
"Ask the user to choose from a list of options"
for i, option in enumerate(options):
print("%d: %s" % (i + 1, option[0]))
while True:
sys.stdout.write("%s (1-%d) [%d]: " % (question, len(options), default + 1))
answer = sys.stdin.readline().strip()
if len(answer) == 0:
return options[default][1]
try:
optno = int(answer)
if optno > 0 and optno <= len(options):
return options[optno - 1][1]
except Exception:
pass
class Workflow:
def __init__(self, workflowdir, sharedir):
self.jinja = Environment(loader=FileSystemLoader(sharedir), trim_blocks=True)
self.name = os.path.basename(workflowdir)
self.workflowdir = workflowdir
self.sharedir = sharedir
self.properties = {}
self.home = os.environ["HOME"]
self.user = pwd.getpwuid(os.getuid())[0]
self.tutorial = None
self.generate_tutorial = False
self.tutorial_setup = None
self.compute_queue = "default"
self.project = "MYPROJ123"
sysname, _, _, _, machine = os.uname()
if sysname == "Darwin":
self.os = "MACOSX"
else:
# Probably Linux
self.os = sysname.upper()
self.arch = machine
def copy_template(self, template, dest, mode=0o644):
"Copy template to dest in workflowdir with mode"
path = os.path.join(self.workflowdir, dest)
t = self.jinja.get_template(template)
t.stream(**self.__dict__).dump(path)
os.chmod(path, mode)
def copy_dir(self, src, dest):
# self.mkdir(dest)
if not src.startswith("/"):
src = os.path.join(self.sharedir, src)
try:
dest = os.path.join(self.workflowdir, dest)
shutil.copytree(src, dest)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else:
raise
def mkdir(self, path):
"Make relative directory in workflowdir"
path = os.path.join(self.workflowdir, path)
if not os.path.exists(path):
os.makedirs(path)
def configure(self):
# The tutorial is a special case
if yesno("Do you want to generate a tutorial workflow?", "n"):
self.config = "tutorial"
self.daxgen = "tutorial"
self.generate_tutorial = True
# determine the environment to setup tutorial for
self.tutorial_setup = optionlist(
"What environment is tutorial to be setup for?",
[
TutorialEnv.LOCAL_MACHINE,
TutorialEnv.USC_HPCC_CLUSTER,
TutorialEnv.OSG_FROM_ISI,
TutorialEnv.XSEDE_BOSCO,
TutorialEnv.BLUEWATERS_GLITE,
TutorialEnv.TACC_WRANGLER,
TutorialEnv.OLCF_TITAN,
TutorialEnv.OLCF_SUMMIT_KUBERNETES_BOSCO,
],
)
# figure out what example options to provide
examples = [
TutorialExample.PROCESS,
TutorialExample.PIPELINE,
TutorialExample.SPLIT,
TutorialExample.MERGE,
TutorialExample.EPA,
TutorialExample.CONTAINER,
]
if self.tutorial_setup != "osg":
examples.append(TutorialExample.DIAMOND)
if self.tutorial_setup in [
"bw-glite",
"wrangler-glite",
"titan-glite",
"summit-kub-bosco",
]:
examples.append(TutorialExample.MPI)
self.project = query(
"What project your jobs should run under. For example on TACC there are like : TG-DDM160003 ?"
)
self.tutorial = optionlist("What tutorial workflow do you want?", examples)
self.setup_tutorial()
return
# Determine which DAX generator API to use
self.daxgen = choice(
"What DAX generator API do you want to use?",
["python", "perl", "java", "r"],
"python",
)
# Determine what kind of site catalog we need to generate
self.config = optionlist(
"What does your computing infrastructure look like?",
[
("Local Machine Condor Pool", "condorpool"),
("Remote Cluster using Globus GRAM", "globus"),
("Remote Cluster using CREAMCE", "creamce"),
("Local PBS Cluster with Glite", "glite"),
("Remote PBS Cluster with BOSCO and SSH", "bosco"),
],
)
# Find out some information about the site
self.sitename = query("What do you want to call your compute site?", "compute")
self.os = choice(
"What OS does your compute site have?", ["LINUX", "MACOSX"], self.os
)
self.arch = choice(
"What architecture does your compute site have?",
["x86_64", "x86"],
self.arch,
)
def setup_tutorial(self):
"""
Set up tutorial for pre-defined computing environments
:return:
"""
if self.tutorial_setup is None:
self.tutorial_setup = "submit-host"
if self.tutorial_setup == "submit-host":
self.sitename = "condorpool"
elif self.tutorial_setup == "usc-hpcc":
self.sitename = "usc-hpcc"
self.config = "glite"
self.compute_queue = "quick"
# for running the whole workflow as mpi job
self.properties["pegasus.job.aggregator"] = "mpiexec"
elif self.tutorial_setup == "osg":
self.sitename = "osg"
self.os = "linux"
if not yesno("Do you want to use Condor file transfers", "y"):
self.staging_site = "isi_workflow"
elif self.tutorial_setup == "xsede-bosco":
self.sitename = "condorpool"
elif self.tutorial_setup == "bw-glite":
self.sitename = "bluewaters"
self.config = "glite"
self.compute_queue = "normal"
elif self.tutorial_setup == "wrangler-glite":
self.sitename = "wrangler"
self.config = "glite"
self.compute_queue = "normal"
elif self.tutorial_setup == "titan-glite":
self.sitename = "titan"
self.config = "glite"
self.compute_queue = "titan"
elif self.tutorial_setup == "summit-kub-bosco":
self.sitename = "summit"
self.config = "bosco"
self.compute_queue = "batch"
return
def generate(self):
os.makedirs(self.workflowdir)
if self.tutorial != "population":
self.mkdir("input")
self.mkdir("output")
if self.generate_tutorial:
self.copy_template("%s/tc.txt" % self.tutorial, "tc.txt")
if self.tutorial == "r-epa":
self.copy_template("%s/daxgen.R" % self.tutorial, "daxgen.R")
elif self.tutorial != "mpi-hw":
self.copy_template("%s/daxgen.py" % self.tutorial, "daxgen.py")
if self.tutorial == "diamond":
# Executables used by the diamond workflow
self.mkdir("bin")
self.copy_template(
"diamond/transformation.py", "bin/preprocess", mode=0o755
)
self.copy_template(
"diamond/transformation.py", "bin/findrange", mode=0o755
)
self.copy_template(
"diamond/transformation.py", "bin/analyze", mode=0o755
)
# Diamond input file
self.copy_template("diamond/f.a", "input/f.a")
elif self.tutorial == "split":
# Split workflow input file
self.mkdir("bin")
self.copy_template("split/pegasus.html", "input/pegasus.html")
elif self.tutorial == "r-epa":
# Executables used by the R-EPA workflow
self.mkdir("bin")
self.copy_template(
"r-epa/epa-wrapper.sh", "bin/epa-wrapper.sh", mode=0o755
)
self.copy_template("r-epa/setupvar.R", "bin/setupvar.R", mode=0o755)
self.copy_template(
"r-epa/weighted.average.R", "bin/weighted.average.R", mode=0o755
)
self.copy_template(
"r-epa/cumulative.percentiles.R",
"bin/cumulative.percentiles.R",
mode=0o755,
)
elif self.tutorial == "population":
self.copy_template("%s/Dockerfile" % self.tutorial, "Dockerfile")
self.copy_template("%s/Singularity" % self.tutorial, "Singularity")
self.copy_template(
"%s/tc.txt.containers" % self.tutorial, "tc.txt.containers"
)
self.copy_dir("%s/scripts" % self.tutorial, "scripts")
self.copy_dir("%s/data" % self.tutorial, "input")
# copy the mpi wrapper, c code and mpi
elif self.tutorial == "mpi-hw":
# copy the mpi wrapper, c code and mpi example
# Executables used by the mpi-hw workflow
self.mkdir("bin")
self.copy_template(
"%s/pegasus-mpi-hw.c" % self.tutorial, "pegasus-mpi-hw.c"
)
self.copy_template("%s/Makefile" % self.tutorial, "Makefile")
self.copy_template("%s/daxgen.py.template" % self.tutorial, "daxgen.py")
self.copy_template(
"%s/mpi-hello-world-wrapper" % self.tutorial,
"bin/mpi-hello-world-wrapper",
mode=0o755,
)
self.copy_template("split/pegasus.html", "input/f.in")
else:
self.copy_template("tc.txt", "tc.txt")
if self.daxgen == "python":
self.copy_template("daxgen/daxgen.py", "daxgen.py")
elif self.daxgen == "perl":
self.copy_template("daxgen/daxgen.pl", "daxgen.pl")
elif self.daxgen == "java":
self.copy_template("daxgen/DAXGen.java", "DAXGen.java")
elif self.daxgen == "r":
self.copy_template("daxgen/daxgen.R", "daxgen.R")
else:
assert False
self.copy_template("sites.xml", "sites.xml")
self.copy_template("plan_dax.sh", "plan_dax.sh", mode=0o755)
self.copy_template("plan_cluster_dax.sh", "plan_cluster_dax.sh", mode=0o755)
self.copy_template("generate_dax.sh", "generate_dax.sh", mode=0o755)
self.copy_template("README.md", "README.md")
self.copy_template("rc.txt", "rc.txt")
self.copy_template("pegasus.properties", "pegasus.properties")
if self.tutorial == "diamond":
if self.tutorial_setup == "wrangler-glite":
self.copy_template(
"pmc-wrapper.wrangler", "bin/pmc-wrapper", mode=0o755
)
elif self.tutorial_setup == "titan-glite":
self.copy_template("pmc-wrapper.titan", "bin/pmc-wrapper", mode=0o755)
elif self.tutorial_setup == "wrangler-glite":
self.copy_template(
"pmc-wrapper.wrangler", "bin/pmc-wrapper", mode=0o755
)
elif self.tutorial_setup == "summit-kub-bosco":
self.copy_template("pmc-wrapper.summit", "bin/pmc-wrapper", mode=0o755)
if self.generate_tutorial:
sys.stdout.write(
"Pegasus Tutorial setup for example workflow - %s for execution on %s in directory %s\n"
% (self.tutorial, self.tutorial_setup, self.workflowdir)
)
def usage():
print("Usage: %s WORKFLOW_DIR" % sys.argv[0])
def main(pegasus_share_dir):
if len(sys.argv) != 2:
usage()
exit(1)
if "-h" in sys.argv:
usage()
exit(1)
workflowdir = sys.argv[1]
if os.path.exists(workflowdir):
print("ERROR: WORKFLOW_DIR '%s' already exists" % workflowdir)
exit(1)
workflowdir = os.path.abspath(workflowdir)
sharedir = os.path.join(pegasus_share_dir, "init")
w = Workflow(workflowdir, sharedir)
w.configure()
w.generate()
|
pegasus-isi/pegasus
|
packages/pegasus-python/src/Pegasus/init-old.py
|
Python
|
apache-2.0
| 14,973
|
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import cuda, float32, float64, int32
from numba.cuda.testing import unittest
@cuda.jit(argtypes=[float32[:, :], int32, int32])
def div(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
@cuda.jit(argtypes=[float64[:, :], int32, int32])
def div_double(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
class TestCudaIDiv(unittest.TestCase):
def test_inplace_div(self):
x = np.ones((2, 2), dtype=np.float32)
grid = cuda.to_device(x)
div(grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
def test_inplace_div_double(self):
x = np.ones((2, 2), dtype=np.float64)
grid = cuda.to_device(x)
div_double(grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
if __name__ == '__main__':
unittest.main()
|
GaZ3ll3/numba
|
numba/cuda/tests/cudapy/test_idiv.py
|
Python
|
bsd-2-clause
| 1,025
|
from shinymud.lib.world import World
from shinymud.models import Model, Column, model_list
from shinymud.models.shiny_types import *
from shinymud.models.item_types import ITEM_TYPES
from shinymud.models import Model, Column, model_list
from shinymud.models.shiny_types import *
from shinymud.data.config import CURRENCY
import json
import re
class NpcAiPack(Model):
log = World.get_world().log
"""The base class that must be inherited by all ai packs.
If you're going to build a new ai pack, the first stop is to inherit from
this class and implement its required functions (explained below!).
Remember to add your new ai pack to the registry, or else it won't exist
in game! Do so by adding this after the ai class:
NPC_AI_PACKS = {'<in-game_ai_pack_name>': <class_name>}
And this to register it as a model:
model_list.register(<class_name>)
"""
def __init__(self, args={}):
""" An AiPack's __init__ function should take a dictionary of keyword
arguments which could be empty (if this is a brand new instance), or hold
the saved values of your attributes loaded from the database.
Your init function should look something like the following:
You will probably want your ai pack inherit from a model, which allows for
automatic saving and loading of basic attributes. You do this by passing
a single line to the Model:
Model.__init__(self, args)
Now, if your ai has any columns they will be automatically loaded with
your ai pack.
(See Modles in __init__.py for how to use columns to load data.)
"""
raise NpcAiTypeInterfaceError('You need to implement the init function.')
def __str__(self):
"""Return a string representation of this ai pack to be displayed
during BuildMode.
To be consistent throughout BuildMode, It should have a heading in all
caps, and each of its attributes labeled and indented by two spaces
beneath it.
The string returned should look something like the following:
ITEMTYPE ATTRIBUTES:
foo: The value of the foo attribute
bar: The value of the bar attribute
"""
raise NpcAiTypeInterfaceError('You need to implement the str function.')
class NpcAiTypeInterfaceError(Exception):
"""The default error to be raised if a required function is not implemented
in a NpcAiPack subclass.
"""
pass
class MerchandiseList(Model):
"""Merchandise list for a particular NPC.
This class handles all of the items a Merchant buys and sells. This class
needs to handle the arbitrary destruction and imortation of areas, while still
keeping track of all the items it sells. It does so by keeping 'soft links' with
the items, referencing them by item id and verifying them by area and name.
When an area with items is removed, we still keep track of items in a 'dead' list.
The function "resurrect()" is used to bring them back when the area is re-imported.
"""
db_table_name = 'merchandise_list'
db_columns = Model.db_columns + [
Column('merchant', foreign_key=('merchant', 'dbid'), null=False, type='INTEGER',
write=lambda merchant: merchant.dbid),
Column('live',read=read_merchandise, write=write_merchandise),
Column('dead', read=read_list, write=write_list),
Column('buyer', read=to_bool, default=True),
Column('markup', read=read_float, type='NUMBER', default=1),
]
def __init__(self, args={}):
Model.__init__(self, args)
# Both the "live" and "dead" lists contain dictionaries in the form:
# {'id': item_id, 'area': item_area, 'name': item_name, 'price': price}
#
# Neither directly keeps track of items. If an item on the merchandise
# list no longer exists, it will be stored in the 'dead' list. Dead
# items are occationally checked on, and re-added to the 'live' list
# if the area gets re-imported.
#
# Keywords are stored with live items so we can check quickly if they
# are in the merchandise list. Keywords are not saved with their items,
# and may not exist for dead items.
if not self.live:
self.live = []
if not self.dead:
self.dead = []
self.resolve()
def resolve(self):
"""Check live items, and make sure they are still alive. """
for group in self.live:
i = self.verify_item(group)
if not i:
self.live.remove(group)
self.dead.append(group)
else:
group['keywords'] = i.keywords
group['price'] = int(group['price'])
def resurrect(self):
"""Try to find the items in the self.dead list: if they can now be found
in the world, remove them from the self.dead list and add them to the
self.live list.
"""
for group in self.dead:
i = self.verify_item(group)
if i:
self.dead.remove(group)
self.live.append(group)
group['keywords'] = i.keywords
group['price'] = int(group['price'])
def merch_list(self):
"""Get a list of the item's for sale to players."""
if self.dead:
self.resurrect()
self.resolve()
if self.live:
sl = ''
for group in self.live:
sl += ' %s %s -- %s\n' % (str(group['price']), CURRENCY,
group['name'])
else:
sl = None
return sl
def build_list(self):
"""Gets a list of the sale_items formatted for Builders."""
if self.dead:
self.resurrect()
sl = ''
i = 1
for group in self.live:
sl += ' [%s] %s %s -- %s (%s:%s)\n' % (str(i), str(group['price']),
self.world.currency_name,
group['name'],
group['id'],
group['area'])
i += 1
if self.dead:
sl = '---- Missing Items ----\n ' +\
"This merchant can't find the following items you told him to sell:\n"
for group in self.dead:
sl += 'Item %s from area %s: %s %s.\n' % (group['id'],
group['area'],
str(group['price']),
CURRENCY)
sl += """
These items or their areas have been removed from the world. You can try
restoring them by re-importing any missing areas, or removing them from this
merchant by typing "remove missing".
"""
if not sl:
sl = 'Nothing.'
return sl
def verify_item(self, item_group):
"""Make sure our refrence to the real item matches, and return the item
if true. Otherwise return false.
"""
if self.world.area_exists(item_group.get('area')):
item = self.world.get_area(item_group.get('area')).get_item(item_group.get('id'))
if item and item.name == item_group['name']:
return item
return None
def add_item(self, item_group):
"""Add an item (and its price) to the self.live list.
"""
item = item_group[0]
self.live.append({'id': item_group[0].id, 'area':item_group[0].area.name,
'name':item_group[0].name, 'price':item_group[1],
'keywords': item_group[0].keywords
})
def get_item(self, keyword):
"""If this merchant has an item with the given keyword, return it and
its price in [item, price] form, else return None.
"""
for group in self.live:
if keyword in group['keywords']:
item = self.verify_item(group)
if item:
return (item, group.get('price'))
return None
def pop(self, index):
"""Remove and return the item at the given index for self.live.
If the index given is invalid, return None.
"""
if (index > len(self.live)) or (index < 0):
return None
group = self.live.pop(index)
return (self.verify_item(group), group.get("price"))
def reset_dead(self):
"""Set the self.dead list back to an empty list.
"""
self.dead = []
model_list.register(MerchandiseList)
class Merchant(NpcAiPack):
help = (
"""<title>Merchant (Npc AI Pack)
The Merchant AI pack is meant to give NPCs the ability to become merchants.
"""
)
plural_map = {'plain':'plain items'}
plural_map.update(dict([(key, val.plural) for key,val in ITEM_TYPES.items()]))
db_table_name = 'merchant'
db_columns = Model.db_columns + [
Column('npc', foreign_key=('npc', 'dbid'), null=False, type='INTEGER',
write=lambda npc: npc.dbid),
Column('buyer', read=to_bool, default=True),
Column('markup', read=read_float, type='NUMBER', default=1),
Column('buys_types', read=read_list, write=write_list),
Column('sale_items', write=lambda ml: ml.save() if ml else None)
]
def __init__(self, args={}):
Model.__init__(self, args)
self.buys_types = []
if not self.sale_items:
#We only get here if this is the first instance of the merchant,
# otherwise the merchandise will be loaded from load_extras()
merch_dict = {'merchant': self}
self.sale_items = MerchandiseList(merch_dict)
self.sale_items.save()
def load_extras(self):
#Load the merchandise list for the Merchant
merchl = self.world.db.select('* FROM merchandise_list WHERE merchant=?', [self.dbid])
if merchl:
merchl[0]['merchant'] = self
self.sale_items = MerchandiseList(merchl[0])
def __str__(self):
if self.buyer:
bt = ', '.join(self.buys_types) or 'Buys all types.'
else:
bt = 'Merchant is not a buyer.'
s = '\n'.join(["MERCHANT ATTRIBUTES:",
" For sale:\n" + self.sale_items.build_list(),
" Buys items: " + str(self.buyer),
" Buys only these types: " + bt,
" Markup: " + str(self.markup) + 'x item\'s base value.',
""
])
return s
def player_sale_list(self):
merch = self.sale_items.merch_list()
if merch:
l = '%s\'s sale list:\n%s' % (self.npc.name, merch)
else:
l = '%s doesn\'t have anything for sale.' % self.npc.name
return l
def tell_buy_types(self):
"""Return a sentence formatted list of the types this merchant buys."""
if not self.buyer:
return 'I\'m not interested in buying anything.'
if not self.buys_types:
return "I'll buy whatever you've got!"
# only a single thing in the list
p = self.plural_map
if len(self.buys_types) == 1:
m = "I only buy %s." % p[self.buys_types[0]]
# Two or more types
if len(self.buys_types) >= 2:
m = "I only buy %s and %s." % (', '.join(map(lambda x: p[x], self.buys_types[:-1])), p[self.buys_types[-1]])
return m
def get_item(self, keyword):
"""If this merchant has an item with the given keyword, return it and
its price in [item, price] form, else return None.
"""
return self.sale_items.get_item(keyword)
def will_buy(self, item):
"""Return True if merchant will buy a certain item, false if they will
not.
"""
# If the merchant is not a buyer, return False by definition
if not self.buyer:
return False
# If there are no specific types specified (the buys_types list is
# empty), then the merchant buys ALL types and we should return True
# by default
if not self.buys_types:
return True
# If item has no item types, then merchant will only buy the item if
# they accept "plain" items
if (not item.item_types) and ('plain' in self.buys_types):
return True
# If this item has at least one item type that is listed in this
# merchant's buys_types list, then the item is eligible to be bought and
# we should return True
for t in item.item_types:
if t in self.buys_types:
return True
# If we made it to here, then this merchant doesn't want this item type.
return False
def build_set_markup(self, markup, player=None):
"""Set the markup percentage for this merchant."""
if not markup.strip():
return 'Try: "set markup <mark-up>", or see "help merchant".'
try:
mark = float(markup)
except:
return 'Markup must be a number. See "help merchant" for details.'
else:
if mark < 0:
return 'Markup must number greater than zero.'
self.markup = mark
self.save()
return 'Markup is now set to %s.' % (markup)
def build_set_buys(self, buyer, player=None):
"""Set whether or not this merchant is a buyer."""
if not buyer.strip():
return 'Try "set buys <true/false>", or see "help merchant".'
b = to_bool(buyer)
if b is None:
return 'Buys items can only be set to true or false.'
self.buyer = b
self.save()
return 'Buys items has been set to %s.' % str(self.buyer)
def build_add_type(self, itype, player=None):
"""Add an item type that this merchant should specialize in buying."""
if not self.buyer:
return ['This merchant is not a buyer.',
'You must set buys to True before this merchant will buy anything from players.']
itype = itype.strip().lower()
if not itype:
return 'Try "add type <item-type>", or see "help merchant".'
if itype == 'all':
self.buys_types = []
self.save()
return 'Merchant now buys all item types.'
if (itype != 'plain') and (itype not in ITEM_TYPES):
return '%s is not a valid item type. See "help merchant" for details.' % itype
self.buys_types.append(itype)
self.save()
return 'Merchant will now buy items of type %s from players.' % itype
def build_remove_type(self, itype, player=None):
"""Remove an item type that this merchant should specialize in buying."""
if not self.buyer:
return 'This merchant is not a buyer.\n' +\
'You must set buys to True before this merchant will buy anything from players.'
if not itype:
return 'Try "remove type <item-type>", or see "help merchant".'
if itype == 'all':
self.buys_types = []
self.save()
return 'Merchant now buys all item types.'
itype = itype.strip().lower()
if itype in self.buys_types:
self.buys_types.remove(itype)
self.save()
return 'Merchant no longer buys items of type %s.' % itype
if (itype != 'plain') and (itype not in ITEM_TYPES):
return '%s is not a valid item type. See "help merchant" for details.' % itype
else:
return 'Merchant already doesn\'t buy items of type %s.' % itype
def build_add_item(self, args, player=None):
"""Add an item for this merchant to sell."""
if not args:
return 'Try "add item <item-id> from area <area-name> price <price>" or see "help merchant".'
# check if the item they gave exists
exp = r'((?P<id1>\d+)[ ]+(at[ ]+)?(price[ ]+)?(?P<price1>\d+))|' +\
r'((?P<id2>\d+)[ ]+((from[ ]+)?(area[ ]+)?(?P<area>\w+)[ ]+)?(at[ ]+)?(price[ ]+)?(?P<price2>\d+))'
match = re.match(exp, args, re.I)
if not match:
return 'Try "add item <item-id> from area <area-name> price <price>" or see "help merchant".'
id1, id2, area_name, p1, p2 = match.group('id1', 'id2', 'area', 'price1', 'price2')
# If the builder didn't give a area_name, just take the area from the npc
if not area_name:
area = self.npc.area
item_id = id1
price = p1
else:
area = self.world.get_area(area_name)
if not area:
return 'Area "%s" doesn\'t exist.' % area_name
item_id = id2
price = p2
item = area.get_item(item_id)
if not item:
return 'Item %s doesn\'t exist.' % item_id
if not price.isdigit():
return 'The price should be a whole number.'
self.sale_items.add_item([item, price])
self.save()
return 'Merchant now sells %s.' % item.name
def build_remove_item(self, item, player=None):
"""Remove one of this merchant's sale items."""
if not item:
return 'Try "remove item <item-id>", or see "help merchant".'
if not item.isdigit():
return 'Try "remove item <item-id>", or see "help merchant".'
# We do item - 1 because we give the user a list that starts at 1, not 0
item = self.sale_items.pop(int(item)-1)
if not item:
return 'That item doesn\'t exist.'
self.save()
return 'Merchant no longer sells %s.' % item[0].name
def build_remove_missing(self, args, player=None):
"""Remove any 'dead' items from this merchant's sale_items.
"""
self.sale_items.reset_dead()
self.save()
return 'Any missing items have been cleared.'
model_list.register(Merchant)
NPC_AI_PACKS = {'merchant': Merchant}
|
shinymud/ShinyMUD
|
src/shinymud/models/npc_ai_packs.py
|
Python
|
mit
| 18,537
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import flt
from erpnext.accounts.utils import get_actual_expense, BudgetError, get_fiscal_year
from erpnext.exceptions import InvalidAccountCurrency
class TestJournalEntry(unittest.TestCase):
def test_journal_entry_with_against_jv(self):
jv_invoice = frappe.copy_doc(test_records[2])
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, jv_invoice)
def test_jv_against_sales_order(self):
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
sales_order = make_sales_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, sales_order)
def test_jv_against_purchase_order(self):
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
purchase_order = create_purchase_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[1])
self.jv_against_voucher_testcase(base_jv, purchase_order)
def jv_against_voucher_testcase(self, base_jv, test_voucher):
dr_or_cr = "credit" if test_voucher.doctype in ["Sales Order", "Journal Entry"] else "debit"
test_voucher.insert()
test_voucher.submit()
if test_voucher.doctype == "Journal Entry":
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where account = %s and docstatus = 1 and parent = %s""",
("_Test Receivable - _TC", test_voucher.name)))
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type = %s and reference_name = %s""", (test_voucher.doctype, test_voucher.name)))
base_jv.get("accounts")[0].is_advance = "Yes" if (test_voucher.doctype in ["Sales Order", "Purchase Order"]) else "No"
base_jv.get("accounts")[0].set("reference_type", test_voucher.doctype)
base_jv.get("accounts")[0].set("reference_name", test_voucher.name)
base_jv.insert()
base_jv.submit()
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type = %s and reference_name = %s and {0}=400""".format(dr_or_cr),
(submitted_voucher.doctype, submitted_voucher.name)))
if base_jv.get("accounts")[0].is_advance == "Yes":
self.advance_paid_testcase(base_jv, submitted_voucher, dr_or_cr)
self.cancel_against_voucher_testcase(submitted_voucher)
def advance_paid_testcase(self, base_jv, test_voucher, dr_or_cr):
#Test advance paid field
advance_paid = frappe.db.sql("""select advance_paid from `tab%s`
where name=%s""" % (test_voucher.doctype, '%s'), (test_voucher.name))
payment_against_order = base_jv.get("accounts")[0].get(dr_or_cr)
self.assertTrue(flt(advance_paid[0][0]) == flt(payment_against_order))
def cancel_against_voucher_testcase(self, test_voucher):
if test_voucher.doctype == "Journal Entry":
# if test_voucher is a Journal Entry, test cancellation of test_voucher
test_voucher.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Journal Entry' and reference_name=%s""", test_voucher.name))
elif test_voucher.doctype in ["Sales Order", "Purchase Order"]:
# if test_voucher is a Sales Order/Purchase Order, test error on cancellation of test_voucher
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertRaises(frappe.LinkExistsError, submitted_voucher.cancel)
def test_jv_against_stock_account(self):
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
set_perpetual_inventory()
jv = frappe.copy_doc(test_records[0])
jv.get("accounts")[0].update({
"account": "_Test Warehouse - _TC",
"party_type": None,
"party": None
})
jv.insert()
from erpnext.accounts.general_ledger import StockAccountInvalidTransaction
self.assertRaises(StockAccountInvalidTransaction, jv.submit)
set_perpetual_inventory(0)
def test_monthly_budget_crossed_ignore(self):
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv.name}))
def test_monthly_budget_crossed_stop(self):
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center - _TC")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def test_yearly_budget_crossed_stop(self):
self.test_monthly_budget_crossed_ignore()
frappe.db.set_value("Company", "_Test Company", "yearly_bgt_flag", "Stop")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 150000, "_Test Cost Center - _TC")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value("Company", "_Test Company", "yearly_bgt_flag", "Ignore")
def test_monthly_budget_on_cancellation(self):
self.set_total_expense_zero("2013-02-28")
jv1 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 20000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv1.name}))
jv2 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 20000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv2.name}))
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.assertRaises(BudgetError, jv1.cancel)
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def get_actual_expense(self, monthly_end_date):
return get_actual_expense({
"account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC",
"monthly_end_date": monthly_end_date,
"company": "_Test Company",
"fiscal_year": get_fiscal_year(monthly_end_date)[0]
})
def set_total_expense_zero(self, posting_date):
existing_expense = self.get_actual_expense(posting_date)
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", -existing_expense, "_Test Cost Center - _TC", submit=True)
def test_multi_currency(self):
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Bank - _TC", 100, exchange_rate=50, save=False)
jv.get("accounts")[1].credit_in_account_currency = 5000
jv.submit()
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s
order by account asc""", jv.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"_Test Bank USD - _TC": {
"account_currency": "USD",
"debit": 5000,
"debit_in_account_currency": 100,
"credit": 0,
"credit_in_account_currency": 0
},
"_Test Bank - _TC": {
"account_currency": "INR",
"debit": 0,
"debit_in_account_currency": 0,
"credit": 5000,
"credit_in_account_currency": 5000
}
}
for field in ("account_currency", "debit", "debit_in_account_currency", "credit", "credit_in_account_currency"):
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[gle.account][field], gle[field])
# cancel
jv.cancel()
gle = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Sales Invoice' and voucher_no=%s""", jv.name)
self.assertFalse(gle)
def test_disallow_change_in_account_currency_for_a_party(self):
# create jv in USD
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Receivable USD - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
jv.submit()
# create jv in USD, but account currency in INR
jv = make_journal_entry("_Test Bank - _TC",
"_Test Receivable - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
self.assertRaises(InvalidAccountCurrency, jv.submit)
# back in USD
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Receivable USD - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
jv.submit()
def make_journal_entry(account1, account2, amount, cost_center=None, exchange_rate=1, save=True, submit=False):
jv = frappe.new_doc("Journal Entry")
jv.posting_date = "2013-02-14"
jv.company = "_Test Company"
jv.user_remark = "test"
jv.multi_currency = 1
jv.set("accounts", [
{
"account": account1,
"cost_center": cost_center,
"debit_in_account_currency": amount if amount > 0 else 0,
"credit_in_account_currency": abs(amount) if amount < 0 else 0,
"exchange_rate": exchange_rate
}, {
"account": account2,
"cost_center": cost_center,
"credit_in_account_currency": amount if amount > 0 else 0,
"debit_in_account_currency": abs(amount) if amount < 0 else 0,
"exchange_rate": exchange_rate
}
])
if save or submit:
jv.insert()
if submit:
jv.submit()
return jv
test_records = frappe.get_test_records('Journal Entry')
|
aruizramon/alec_erpnext
|
erpnext/accounts/doctype/journal_entry/test_journal_entry.py
|
Python
|
agpl-3.0
| 9,838
|
from time import time
import sys as s
import re
import subprocess as sb
import numpy as np
import array
integer = re.compile("[0-9]+")
#Returns the pair (identifier of patient a.k.a. @filename,list of identifiers of sequences matching a read in this patient)
def parseMatch(filename,i):
number = int(sb.check_output("head -n 1 ./meta/match/testfiles/file" + str(i) + ".test",shell=True))
result = np.zeros(number)
index = 0
with open("./meta/match/testfiles/file" + str(i) + ".test","r+b") as fo:
isFirst = True
for read in fo:
if isFirst:
isFirst = False
else:
iterator = re.finditer(integer,read)
for i in iterator:
result[index] = int(i.group(0))
index += 1
return result
#Returns dictionary @allMatches (key=sample ID a.k.a. @filename,value=list of identifiers of sequences matching a read in this sample)
def parseAllMatch(filenames):
allMatches = dict.fromkeys(filenames)
i = 0
for filename in filenames:
try:
if filename:
sequencesArray = parseMatch(filename,i)
allMatches[filename] = sequencesArray
i += 1
except IOError:
print "\nERROR: Maybe the filename",filename,".match does not exist in \"meta/matches\" folder\n"
s.exit(0)
return allMatches
def parseAllFact(filenames):
ln = len(filenames)
fact = ln/12
allMatchesList = []
start = 0
end = ln/fact
for i in range(fact):
allMatchesList.append(parseAllMatch(filenames[start:end]))
start = end
end += ln/fact
allMatchesList.append(parseAllMatch(filenames[end:]))
for matchDict in allMatchesList[1:]:
allMatchesList[0].update(matchDict)
return allMatchesList[0]
|
kuredatan/taxocluster
|
parsingMatch.py
|
Python
|
mit
| 1,859
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from operator import itemgetter
from itertools import groupby
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp.tools import float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', size=64, required=True, help="Incoterms are series of sales terms.They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Code for Incoterms"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM without deleting it."),
}
_defaults = {
'active': True,
}
stock_incoterms()
class stock_journal(osv.osv):
_name = "stock.journal"
_description = "Stock Journal"
_columns = {
'name': fields.char('Stock Journal', size=32, required=True),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'user_id': lambda s, c, u, ctx: u
}
stock_journal()
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Location"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'posz,name'
_order = 'parent_left'
# TODO: implement name_search() in a way that matches the results of name_get!
def name_get(self, cr, uid, ids, context=None):
# always return the full hierarchical name
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
names = [m.name]
parent = m.location_id
while parent:
names.append(parent.name)
parent = parent.location_id
res[m.id] = ' / '.join(reversed(names))
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
return self.search(cr, uid, [('id', 'child_of', ids)], context=context)
def _product_value(self, cr, uid, ids, field_names, arg, context=None):
"""Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).
@param field_names: Name of field
@return: Dictionary of values
"""
prod_id = context and context.get('product_id', False)
if not prod_id:
return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
product_product_obj = self.pool.get('product.product')
cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))
dict1 = cr.dictfetchall()
cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))
dict2 = cr.dictfetchall()
res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))
products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))
result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))
currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
currency_obj = self.pool.get('res.currency')
currency = currency_obj.browse(cr, uid, currency_id, context=context)
for loc_id, product_ids in products_by_location.items():
if prod_id:
product_ids = [prod_id]
c = (context or {}).copy()
c['location'] = loc_id
for prod in product_product_obj.browse(cr, uid, product_ids, context=c):
for f in field_names:
if f == 'stock_real':
if loc_id not in result:
result[loc_id] = {}
result[loc_id][f] += prod.qty_available
elif f == 'stock_virtual':
result[loc_id][f] += prod.virtual_available
elif f == 'stock_real_value':
amount = prod.qty_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
elif f == 'stock_virtual_value':
amount = prod.virtual_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
return result
_columns = {
'name': fields.char('Location Name', size=64, required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location for Inter-Companies Transfers')], 'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
""", select = True),
# temporarily removed, as it's unused: 'allocation_method': fields.selection([('fifo', 'FIFO'), ('lifo', 'LIFO'), ('nearest', 'Nearest')], 'Allocation Method', required=True),
'complete_name': fields.function(_complete_name, type='char', size=256, string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id'], 10)}),
'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi="stock"),
'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi="stock"),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'chained_journal_id': fields.many2one('stock.journal', 'Chaining Journal',help="Inventory Journal in which the chained move will be written, if the Chaining Type is not Transparent (no journal is used if left empty)"),
'chained_location_id': fields.many2one('stock.location', 'Chained Location If Fixed'),
'chained_location_type': fields.selection([('none', 'None'), ('customer', 'Customer'), ('fixed', 'Fixed Location')],
'Chained Location Type', required=True,
help="Determines whether this location is chained to another location, i.e. any incoming product in this location \n" \
"should next go to the chained location. The chained location is determined according to the type :"\
"\n* None: No chaining at all"\
"\n* Customer: The chained location will be taken from the Customer Location field on the Partner form of the Partner that is specified in the Picking list of the incoming products." \
"\n* Fixed Location: The chained location is taken from the next field: Chained Location if Fixed." \
),
'chained_auto_packing': fields.selection(
[('auto', 'Automatic Move'), ('manual', 'Manual Operation'), ('transparent', 'Automatic No Step Added')],
'Chaining Type',
required=True,
help="This is used only if you select a chained location type.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'chained_picking_type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', help="Shipping Type of the Picking List that will contain the chained move (leave empty to automatically detect the type based on the source and destination locations)."),
'chained_company_id': fields.many2one('res.company', 'Chained Company', help='The company the Picking List containing the chained move will belong to (leave empty to use the default company determination rules'),
'chained_delay': fields.integer('Chaining Lead Time',help="Delay between original move and chained move in days"),
'partner_id': fields.many2one('res.partner', 'Location Address',help="Address of customer or supplier."),
'icon': fields.selection(tools.icons, 'Icon', size=64,help="Icon show in hierarchical tree view"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)',help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between all companies'),
'scrap_location': fields.boolean('Scrap Location', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
_defaults = {
'active': True,
'usage': 'internal',
'chained_location_type': 'none',
'chained_auto_packing': 'manual',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'icon': False,
'scrap_location': False,
}
def chained_location_get(self, cr, uid, location, partner=None, product=None, context=None):
""" Finds chained location
@param location: Location id
@param partner: Partner id
@param product: Product id
@return: List of values
"""
result = None
if location.chained_location_type == 'customer':
if partner:
result = partner.property_stock_customer
else:
loc_id = self.pool['res.partner'].default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer']
result = self.pool['stock.location'].browse(cr, uid, loc_id, context=context)
elif location.chained_location_type == 'fixed':
result = location.chained_location_id
if result:
return result, location.chained_auto_packing, location.chained_delay, location.chained_journal_id and location.chained_journal_id.id or False, location.chained_company_id and location.chained_company_id.id or False, location.chained_picking_type, False
return result
def picking_type_get(self, cr, uid, from_location, to_location, context=None):
""" Gets type of picking.
@param from_location: Source location
@param to_location: Destination location
@return: Location type
"""
result = 'internal'
if (from_location.usage=='internal') and (to_location and to_location.usage in ('customer', 'supplier')):
result = 'out'
elif (from_location.usage in ('supplier', 'customer')) and (to_location.usage == 'internal'):
result = 'in'
return result
def _product_get_all_report(self, cr, uid, ids, product_ids=False, context=None):
return self._product_get_report(cr, uid, ids, product_ids, context, recursive=True)
def _product_get_report(self, cr, uid, ids, product_ids=False,
context=None, recursive=False):
""" Finds the product quantity and price for particular location.
@param product_ids: Ids of product
@param recursive: True or False
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product')
# Take the user company and pricetype
context['currency_id'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
# To be able to offer recursive or non-recursive reports we need to prevent recursive quantities by default
context['compute_child'] = False
if not product_ids:
product_ids = product_obj.search(cr, uid, [], context={'active_test': False})
products = product_obj.browse(cr, uid, product_ids, context=context)
products_by_uom = {}
products_by_id = {}
for product in products:
products_by_uom.setdefault(product.uom_id.id, [])
products_by_uom[product.uom_id.id].append(product)
products_by_id.setdefault(product.id, [])
products_by_id[product.id] = product
result = {}
result['product'] = []
for id in ids:
quantity_total = 0.0
total_price = 0.0
for uom_id in products_by_uom.keys():
fnc = self._product_get
if recursive:
fnc = self._product_all_get
ctx = context.copy()
ctx['uom'] = uom_id
qty = fnc(cr, uid, id, [x.id for x in products_by_uom[uom_id]],
context=ctx)
for product_id in qty.keys():
if not qty[product_id]:
continue
product = products_by_id[product_id]
quantity_total += qty[product_id]
# Compute based on pricetype
# Choose the right filed standard_price to read
amount_unit = product.price_get('standard_price', context=context)[product.id]
price = qty[product_id] * amount_unit
total_price += price
result['product'].append({
'price': amount_unit,
'prod_name': product.name,
'code': product.default_code, # used by lot_overview_all report!
'variants': product.variants or '',
'uom': product.uom_id.name,
'prod_qty': qty[product_id],
'price_value': price,
})
result['total'] = quantity_total
result['total_price'] = total_price
return result
def _product_get_multi_location(self, cr, uid, ids, product_ids=False, context=None,
states=['done'], what=('in', 'out')):
"""
@param product_ids: Ids of product
@param states: List of states
@param what: Tuple of
@return:
"""
product_obj = self.pool.get('product.product')
if context is None:
context = {}
context.update({
'states': states,
'what': what,
'location': ids
})
return product_obj.get_product_available(cr, uid, product_ids, context=context)
def _product_get(self, cr, uid, id, product_ids=False, context=None, states=None):
"""
@param product_ids:
@param states:
@return:
"""
if states is None:
states = ['done']
ids = id and [id] or []
return self._product_get_multi_location(cr, uid, ids, product_ids, context=context, states=states)
def _product_all_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
# build the list of ids of children of the location given by id
ids = id and [id] or []
location_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
return self._product_get_multi_location(cr, uid, location_ids, product_ids, context, states)
def _product_virtual_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
return self._product_all_get(cr, uid, id, product_ids, context, ['confirmed', 'waiting', 'assigned', 'done'])
def _product_reserve(self, cr, uid, ids, product_id, product_qty, context=None, lock=False):
"""
Attempt to find a quantity ``product_qty`` (in the product's default uom or the uom passed in ``context``) of product ``product_id``
in locations with id ``ids`` and their child locations. If ``lock`` is True, the stock.move lines
of product with id ``product_id`` in the searched location will be write-locked using Postgres's
"FOR UPDATE NOWAIT" option until the transaction is committed or rolled back, to prevent reservin
twice the same products.
If ``lock`` is True and the lock cannot be obtained (because another transaction has locked some of
the same stock.move lines), a log line will be output and False will be returned, as if there was
not enough stock.
:param product_id: Id of product to reserve
:param product_qty: Quantity of product to reserve (in the product's default uom or the uom passed in ``context``)
:param lock: if True, the stock.move lines of product with id ``product_id`` in all locations (and children locations) with ``ids`` will
be write-locked using postgres's "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back. This is
to prevent reserving twice the same products.
:param context: optional context dictionary: if a 'uom' key is present it will be used instead of the default product uom to
compute the ``product_qty`` and in the return value.
:return: List of tuples in the form (qty, location_id) with the (partial) quantities that can be taken in each location to
reach the requested product_qty (``qty`` is expressed in the default uom of the product), of False if enough
products could not be found, or the lock could not be obtained (and ``lock`` was True).
"""
result = []
amount = 0.0
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
uom_rounding = self.pool.get('product.product').browse(cr, uid, product_id, context=context).uom_id.rounding
if context.get('uom'):
uom_rounding = uom_obj.browse(cr, uid, context.get('uom'), context=context).rounding
locations_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
if locations_ids:
# Fetch only the locations in which this product has ever been processed (in or out)
cr.execute("""SELECT l.id FROM stock_location l WHERE l.id in %s AND
EXISTS (SELECT 1 FROM stock_move m WHERE m.product_id = %s
AND ((state = 'done' AND m.location_dest_id = l.id)
OR (state in ('done','assigned') AND m.location_id = l.id)))
""", (tuple(locations_ids), product_id,))
locations_ids = [i for (i,) in cr.fetchall()]
for id in locations_ids:
if lock:
try:
# Must lock with a separate select query because FOR UPDATE can't be used with
# aggregation/group by's (when individual rows aren't identifiable).
# We use a SAVEPOINT to be able to rollback this part of the transaction without
# failing the whole transaction in case the LOCK cannot be acquired.
cr.execute("SAVEPOINT stock_location_product_reserve")
cr.execute("""SELECT id FROM stock_move
WHERE product_id=%s AND
(
(location_dest_id=%s AND
location_id<>%s AND
state='done')
OR
(location_id=%s AND
location_dest_id<>%s AND
state in ('done', 'assigned'))
)
FOR UPDATE of stock_move NOWAIT""", (product_id, id, id, id, id), log_exceptions=False)
except Exception:
# Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
# so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
# state, we return False as if the products were not available, and log it:
cr.execute("ROLLBACK TO stock_location_product_reserve")
_logger.warning("Failed attempt to reserve %s x product %s, likely due to another transaction already in progress. Next attempt is likely to work. Detailed error available at DEBUG level.", product_qty, product_id)
_logger.debug("Trace of the failed product reservation attempt: ", exc_info=True)
return False
# XXX TODO: rewrite this with one single query, possibly even the quantity conversion
cr.execute("""SELECT product_uom, sum(product_qty) AS product_qty
FROM stock_move
WHERE location_dest_id=%s AND
location_id<>%s AND
product_id=%s AND
state='done'
GROUP BY product_uom
""",
(id, id, product_id))
results = cr.dictfetchall()
cr.execute("""SELECT product_uom,-sum(product_qty) AS product_qty
FROM stock_move
WHERE location_id=%s AND
location_dest_id<>%s AND
product_id=%s AND
state in ('done', 'assigned')
GROUP BY product_uom
""",
(id, id, product_id))
results += cr.dictfetchall()
total = 0.0
results2 = 0.0
for r in results:
amount = uom_obj._compute_qty(cr, uid, r['product_uom'], r['product_qty'], context.get('uom', False))
results2 += amount
total += amount
if total <= 0.0:
continue
amount = results2
compare_qty = float_compare(amount, 0, precision_rounding=uom_rounding)
if compare_qty == 1:
if amount > min(total, product_qty):
amount = min(product_qty, total)
result.append((amount, id))
product_qty -= amount
total -= amount
if product_qty <= 0.0:
return result
if total <= 0.0:
continue
return False
stock_location()
class stock_tracking(osv.osv):
_name = "stock.tracking"
_description = "Packs"
def checksum(sscc):
salt = '31' * 8 + '3'
sum = 0
for sscc_part, salt_part in zip(sscc, salt):
sum += int(sscc_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
def make_sscc(self, cr, uid, context=None):
sequence = self.pool.get('ir.sequence').get(cr, uid, 'stock.lot.tracking')
try:
return sequence + str(self.checksum(sequence))
except Exception:
return sequence
_columns = {
'name': fields.char('Pack Reference', size=64, required=True, select=True, help="By default, the pack reference is generated following the sscc standard. (Serial number + 1 check digit)"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a pack without deleting it."),
'serial': fields.char('Additional Reference', size=64, select=True, help="Other reference or serial number"),
'move_ids': fields.one2many('stock.move', 'tracking_id', 'Moves for this pack', readonly=True),
'date': fields.datetime('Creation Date', required=True),
}
_defaults = {
'active': 1,
'name': make_sscc,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('serial', '=', name)]+ args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def name_get(self, cr, uid, ids, context=None):
"""Append the serial to the name"""
if not len(ids):
return []
res = [ (r['id'], r['serial'] and '%s [%s]' % (r['name'], r['serial'])
or r['name'] )
for r in self.read(cr, uid, ids, ['name', 'serial'],
context=context) ]
return res
def unlink(self, cr, uid, ids, context=None):
raise osv.except_osv(_('Error!'), _('You cannot remove a lot line.'))
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
return self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
stock_tracking()
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "id desc"
def _set_maximum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is greater than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%d """ % (value, pick.id)
if pick.max_date:
sql_str += " and (date_expected='" + pick.max_date + "')"
cr.execute(sql_str)
return True
def _set_minimum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is less than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%s """ % (value, pick.id)
if pick.min_date:
sql_str += " and (date_expected='" + pick.min_date + "')"
cr.execute(sql_str)
return True
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected)
from
stock_move
where
picking_id IN %s
group by
picking_id""",(tuple(ids),))
for pick, dt1, dt2 in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
return res
def _get_stock_move_changes(self, cr, uid, ids, context=None):
'''Return the ids of pickings that should change, due to changes
in stock moves.'''
move_pool = self.pool['stock.move']
picking_ids = set()
for move_obj in move_pool.browse(cr, uid, ids, context=context):
if move_obj.picking_id:
picking_ids.add(move_obj.picking_id.id)
return list(picking_ids)
def create(self, cr, user, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
seq_obj_name = 'stock.picking.%s' % vals.get('type', 'internal')
vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
new_id = super(stock_picking, self).create(cr, user, vals, context)
return new_id
_columns = {
'name': fields.char('Reference', size=64, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'origin': fields.char('Source Document', size=64, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', required=True, select=True, help="Shipping type specify, goods coming in or going out."),
'note': fields.text('Notes', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'stock_journal_id': fields.many2one('stock.journal','Stock Journal', select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'location_id': fields.many2one('stock.location', 'Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations.", select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Location where the system will stock the finished products.", select=True),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], 'Status', readonly=True, select=True, track_visibility='onchange', help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'min_date': fields.function(
get_min_max_date,
fnct_inv=_set_minimum_date, multi='min_max_date',
store={
'stock.move': (
_get_stock_move_changes,
['date_expected'], 10,
)
},
type='datetime', string='Scheduled Time', select=True,
help="Scheduled time for the shipment to be processed"
),
'date': fields.datetime('Creation Date', help="Creation date, usually the time of the order.", select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'max_date': fields.function(
get_min_max_date,
fnct_inv=_set_maximum_date, multi='min_max_date',
store={
'stock.move': (
_get_stock_move_changes,
['date_expected'], 10,
)
},
type='datetime', string='Max. Expected Date', select=True
),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'auto_picking': fields.boolean('Auto-Picking', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, readonly=True, track_visibility='onchange', states={'draft': [('readonly', False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
'state': 'draft',
'move_type': 'direct',
'type': 'internal',
'invoice_state': 'none',
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c)
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
def action_process(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Open the partial picking wizard"""
context.update({
'active_model': self._name,
'active_ids': ids,
'active_id': len(ids) and ids[0] or False
})
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.partial.picking',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
'nodestroy': True,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
picking_obj = self.browse(cr, uid, id, context=context)
if ('name' not in default) or (picking_obj.name == '/'):
seq_obj_name = 'stock.picking.' + picking_obj.type
default['name'] = self.pool.get('ir.sequence').get(cr, uid, seq_obj_name)
default.setdefault('origin', False)
default.setdefault('backorder_id', False)
if 'invoice_state' not in default and picking_obj.invoice_state == 'invoiced':
default['invoice_state'] = '2binvoiced'
res = super(stock_picking, self).copy(cr, uid, id, default, context)
return res
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
if view_type == 'form' and not view_id:
mod_obj = self.pool.get('ir.model.data')
if self._name == "stock.picking.in":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_in_form')
if self._name == "stock.picking.out":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
return super(stock_picking, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
return {}
def action_explode(self, cr, uid, moves, context=None):
"""Hook to allow other modules to split the moves of a picking."""
return moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms picking.
@return: True
"""
pickings = self.browse(cr, uid, ids, context=context)
to_update = []
for pick in pickings:
if pick.state != 'confirmed':
to_update.append(pick.id)
if to_update:
self.write(cr, uid, to_update, {'state': 'confirmed'})
todo = []
for picking in pickings:
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
todo = self.action_explode(cr, uid, todo, context)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
return True
def test_auto_picking(self, cr, uid, ids):
# TODO: Check locations to see if in the same location ?
return True
def action_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if all moves are confirmed.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if pick.state == 'draft':
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_confirm', cr)
move_ids = [x.id for x in pick.move_lines if x.state == 'confirmed']
if not move_ids:
raise osv.except_osv(_('Warning!'),_('Not enough stock, unable to reserve the products.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids)
return True
def force_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed','waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def draft_force_assign(self, cr, uid, ids, *args):
""" Confirms picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if not pick.move_lines:
raise osv.except_osv(_('Error!'),_('You cannot process picking without stock moves.'))
wf_service.trg_validate(uid, 'stock.picking', pick.id,
'button_confirm', cr)
return True
def draft_validate(self, cr, uid, ids, context=None):
""" Validates picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
self.draft_force_assign(cr, uid, ids)
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return self.action_process(
cr, uid, ids, context=context)
def cancel_assign(self, cr, uid, ids, *args):
""" Cancels picking and moves.
@return: True
"""
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').cancel_assign(cr, uid, move_ids)
return True
def action_assign_wkf(self, cr, uid, ids, context=None):
""" Changes picking state to assigned.
@return: True
"""
to_update = []
for pick in self.browse(cr, uid, ids, context=context):
if pick.state != 'assigned':
to_update.append(pick.id)
if to_update:
self.write(cr, uid, to_update, {'state': 'assigned'})
return True
def test_finished(self, cr, uid, ids):
""" Tests whether the move is in done or cancel state or not.
@return: True or False
"""
move_ids = self.pool.get('stock.move').search(cr, uid, [('picking_id', 'in', ids)])
for move in self.pool.get('stock.move').browse(cr, uid, move_ids):
if move.state not in ('done', 'cancel'):
if move.product_qty != 0.0:
return False
else:
move.write({'state': 'done'})
return True
def test_assigned(self, cr, uid, ids):
""" Tests whether the move is in assigned state or not.
@return: True or False
"""
#TOFIX: assignment of move lines should be call before testing assigment otherwise picking never gone in assign state
ok = True
for pick in self.browse(cr, uid, ids):
mt = pick.move_type
# incomming shipments are always set as available if they aren't chained
if pick.type == 'in':
if all([x.state != 'waiting' for x in pick.move_lines]):
return True
for move in pick.move_lines:
if (move.state) == 'waiting':
move.check_assign()
if (move.state in ('confirmed', 'draft')) and (mt == 'one'):
return False
if (mt == 'direct') and (move.state == 'assigned') and (move.product_qty):
return True
ok = ok and (move.state in ('cancel', 'done', 'assigned'))
return ok
def action_cancel(self, cr, uid, ids, context=None):
""" Changes picking state to cancel.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
self.write(cr, uid, ids, {'state': 'cancel', 'invoice_state': 'none'})
return True
#
# TODO: change and create a move if not parents
#
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done.
This method is called at the end of the workflow by the activity "done".
@return: True
"""
self.write(cr, uid, ids, {'state': 'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
return True
def action_move(self, cr, uid, ids, context=None):
"""Process the Stock Moves of the Picking
This method is called by the workflow by the activity "move".
Normally that happens when the signal button_done is received (button
"Done" pressed on a Picking view).
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
self.pool.get('stock.move').action_confirm(cr, uid, [move.id],
context=context)
todo.append(move.id)
elif move.state in ('assigned','confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo,
context=context)
return True
def get_currency_id(self, cr, uid, picking):
return False
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Gets the partner that will be invoiced
Note that this function is inherited in the sale and purchase modules
@param picking: object of the picking for which we are selecting the partner to invoice
@return: object of the partner to invoice
"""
return picking.partner_id and picking.partner_id.id
def _get_comment_invoice(self, cr, uid, picking):
"""
@return: comment string for invoice
"""
return picking.note or ''
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
# Take the user company and pricetype
context['currency_id'] = move_line.company_id.currency_id.id
amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
return amount_unit
else:
return move_line.product_id.list_price
def _get_discount_invoice(self, cr, uid, move_line):
'''Return the discount for the move line'''
return 0.0
def _get_taxes_invoice(self, cr, uid, move_line, type):
""" Gets taxes on invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: Taxes Ids for the move line
"""
if type in ('in_invoice', 'in_refund'):
taxes = move_line.product_id.supplier_taxes_id
else:
taxes = move_line.product_id.taxes_id
if move_line.picking_id and move_line.picking_id.partner_id and move_line.picking_id.partner_id.id:
return self.pool.get('account.fiscal.position').map_tax(
cr,
uid,
move_line.picking_id.partner_id.property_account_position,
taxes
)
else:
return map(lambda x: x.id, taxes)
def _get_account_analytic_invoice(self, cr, uid, picking, move_line):
return False
def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
'''Call after the creation of the invoice line'''
return
def _invoice_hook(self, cr, uid, picking, invoice_id):
'''Call after the creation of the invoice'''
return
def _get_invoice_type(self, pick):
src_usage = dest_usage = None
inv_type = None
if pick.invoice_state == '2binvoiced':
if pick.move_lines:
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if pick.type == 'out' and dest_usage == 'supplier':
inv_type = 'in_refund'
elif pick.type == 'out' and dest_usage == 'customer':
inv_type = 'out_invoice'
elif pick.type == 'in' and src_usage == 'supplier':
inv_type = 'in_invoice'
elif pick.type == 'in' and src_usage == 'customer':
inv_type = 'out_refund'
else:
inv_type = 'out_invoice'
return inv_type
def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
""" Builds the dict for grouped invoices
@param picking: picking object
@param partner: object of the partner to invoice (not used here, but may be usefull if this function is inherited)
@param invoice: object of the invoice that we are updating
@return: dict that will be used to update the invoice
"""
comment = self._get_comment_invoice(cr, uid, picking)
return {
'name': (invoice.name or '') + ', ' + (picking.name or ''),
'origin': (invoice.origin or '') + ', ' + (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
'date_invoice': context.get('date_inv', False),
}
def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
""" Builds the dict containing the values for the invoice
@param picking: picking object
@param partner: object of the partner to invoice
@param inv_type: type of the invoice ('out_invoice', 'in_invoice', ...)
@param journal_id: ID of the accounting journal
@return: dict that will be used to create the invoice object
"""
if isinstance(partner, int):
partner = self.pool.get('res.partner').browse(cr, uid, partner, context=context)
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
comment = self._get_comment_invoice(cr, uid, picking)
invoice_vals = {
'name': picking.name,
'origin': (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'type': inv_type,
'account_id': account_id,
'partner_id': partner.id,
'comment': comment,
'payment_term': payment_term,
'fiscal_position': partner.property_account_position.id,
'date_invoice': context.get('date_inv', False),
'company_id': picking.company_id.id,
'user_id': uid,
}
cur_id = self.get_currency_id(cr, uid, picking)
if cur_id:
invoice_vals['currency_id'] = cur_id
if journal_id:
invoice_vals['journal_id'] = journal_id
return invoice_vals
def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None):
""" Builds the dict containing the values for the invoice line
@param group: True or False
@param picking: picking object
@param: move_line: move_line object
@param: invoice_id: ID of the related invoice
@param: invoice_vals: dict used to created the invoice
@return: dict that will be used to create the invoice line
"""
if group:
name = (picking.name or '') + '-' + move_line.name
else:
name = move_line.name
origin = move_line.picking_id.name or ''
if move_line.picking_id.origin:
origin += ':' + move_line.picking_id.origin
if invoice_vals['type'] in ('out_invoice', 'out_refund'):
account_id = move_line.product_id.property_account_income.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_income_categ.id
else:
account_id = move_line.product_id.property_account_expense.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_expense_categ.id
if invoice_vals['fiscal_position']:
fp_obj = self.pool.get('account.fiscal.position')
fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move_line.product_uos and move_line.product_uos.id or False
if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):
uos_id = move_line.product_uom.id
return {
'name': name,
'origin': origin,
'invoice_id': invoice_id,
'uos_id': uos_id,
'product_id': move_line.product_id.id,
'account_id': account_id,
'price_unit': self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type']),
'discount': self._get_discount_invoice(cr, uid, move_line),
'quantity': move_line.product_uos_qty or move_line.product_qty,
'invoice_line_tax_id': [(6, 0, self._get_taxes_invoice(cr, uid, move_line, invoice_vals['type']))],
'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),
}
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
if context is None:
context = {}
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
partner_obj = self.pool.get('res.partner')
invoices_group = {}
res = {}
inv_type = type
for picking in self.browse(cr, uid, ids, context=context):
if picking.invoice_state != '2binvoiced':
continue
partner = self._get_partner_to_invoice(cr, uid, picking, context=context)
if isinstance(partner, int):
partner = partner_obj.browse(cr, uid, [partner], context=context)[0]
if not partner:
raise osv.except_osv(_('Error, no partner!'),
_('Please put a partner on the picking list if you want to generate invoice.'))
if not inv_type:
inv_type = self._get_invoice_type(picking)
invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
if group and partner.id in invoices_group:
invoice_id = invoices_group[partner.id]
invoice = invoice_obj.browse(cr, uid, invoice_id)
invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)
invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)
else:
invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)
invoices_group[partner.id] = invoice_id
res[picking.id] = invoice_id
for move_line in picking.move_lines:
if move_line.state == 'cancel':
continue
if move_line.scrapped:
# do no invoice scrapped products
continue
vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,
invoice_id, invoice_vals, context=context)
if vals:
invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)
self._invoice_line_hook(cr, uid, move_line, invoice_line_id)
invoice_obj.button_compute(cr, uid, [invoice_id], context=context,
set_total=(inv_type in ('in_invoice', 'in_refund')))
self.write(cr, uid, [picking.id], {
'invoice_state': 'invoiced',
}, context=context)
self._invoice_hook(cr, uid, picking, invoice_id)
self.write(cr, uid, res.keys(), {
'invoice_state': 'invoiced',
}, context=context)
return res
def test_done(self, cr, uid, ids, context=None):
""" Test whether the move lines are done or not.
@return: True or False
"""
ok = False
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state not in ('cancel','done'):
return False
if move.state=='done':
ok = True
return ok
def test_cancel(self, cr, uid, ids, context=None):
""" Test whether the move lines are canceled or not.
@return: True or False
"""
for pick in self.browse(cr, uid, ids, context=context):
for move in pick.move_lines:
if move.state not in ('cancel',):
return False
return True
def allow_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state == 'done':
raise osv.except_osv(_('Error!'), _('You cannot cancel the picking as some moves have been done. You should cancel the picking lines.'))
return True
def unlink(self, cr, uid, ids, context=None):
move_obj = self.pool.get('stock.move')
if context is None:
context = {}
for pick in self.browse(cr, uid, ids, context=context):
if pick.state in ['done','cancel']:
# retrieve the string value of field in user's language
state = dict(self.fields_get(cr, uid, context=context)['state']['selection']).get(pick.state, pick.state)
raise osv.except_osv(_('Error!'), _('You cannot remove the picking which is in %s state!')%(state,))
else:
ids2 = [move.id for move in pick.move_lines]
ctx = context.copy()
ctx.update({'call_unlink':True})
if pick.state != 'draft':
#Cancelling the move in order to affect Virtual stock of product
move_obj.action_cancel(cr, uid, ids2, ctx)
#Removing the move
move_obj.unlink(cr, uid, ids2, ctx)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
# FIXME: needs refactoring, this code is partially duplicated in stock_move.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, partner_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
sequence_obj = self.pool.get('ir.sequence')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picking = None
complete, too_many, too_few = [], [], []
move_product_qty, prodlot_ids, product_avail, partial_qty, uos_qty, product_uoms = {}, {}, {}, {}, {}, {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), {})
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom', move.product_uom.id)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_id = partial_data.get('prodlot_id')
prodlot_ids[move.id] = prodlot_id
product_uoms[move.id] = product_uom
partial_qty[move.id] = uom_obj._compute_qty(cr, uid, product_uoms[move.id], product_qty, move.product_uom.id)
uos_qty[move.id] = move.product_id._compute_uos_qty(product_uom, product_qty, move.product_uos) if product_qty else 0.0
if move.product_qty == partial_qty[move.id]:
complete.append(move)
elif move.product_qty > partial_qty[move.id]:
too_few.append(move)
else:
too_many.append(move)
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
# Record the values that were chosen in the wizard, so they can be
# used for average price computation and inventory valuation
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
# every line of the picking is empty, do not generate anything
empty_picking = not any(q for q in move_product_qty.values() if q > 0)
for move in too_few:
product_qty = move_product_qty[move.id]
if not new_picking and not empty_picking:
new_picking_name = pick.name
self.write(cr, uid, [pick.id],
{'name': sequence_obj.get(cr, uid,
'stock.picking.%s'%(pick.type)),
})
pick.refresh()
new_picking = self.copy(cr, uid, pick.id,
{
'name': new_picking_name,
'move_lines' : [],
'state':'draft',
})
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': uos_qty[move.id],
'picking_id' : new_picking,
'state': 'assigned',
'move_dest_id': move.move_dest_id.id,
'price_unit': move.price_unit,
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
move_obj.copy(cr, uid, move.id, defaults)
move_obj.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - partial_qty[move.id],
'product_uos_qty': move.product_uos_qty - uos_qty[move.id],
'prodlot_id': False,
'tracking_id': False,
})
if new_picking:
move_obj.write(cr, uid, [c.id for c in complete], {'picking_id': new_picking})
for move in complete:
defaults = {'product_uom': product_uoms[move.id], 'product_qty': move_product_qty[move.id]}
if prodlot_ids.get(move.id):
defaults.update({'prodlot_id': prodlot_ids[move.id]})
move_obj.write(cr, uid, [move.id], defaults)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {
'product_qty' : product_qty,
'product_uos_qty': uos_qty[move.id],
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if new_picking:
defaults.update(picking_id=new_picking)
move_obj.write(cr, uid, [move.id], defaults)
# At first we confirm the new picking (if necessary)
if new_picking:
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
# Then we finish the good picking
self.write(cr, uid, [pick.id], {'backorder_id': new_picking})
self.action_move(cr, uid, [new_picking], context=context)
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
delivered_pack_id = new_picking
self.message_post(cr, uid, new_picking, body=_("Back order <em>%s</em> has been <b>created</b>.") % (pick.name), context=context)
elif empty_picking:
delivered_pack_id = pick.id
else:
self.action_move(cr, uid, [pick.id], context=context)
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_done', cr)
delivered_pack_id = pick.id
delivered_pack = self.browse(cr, uid, delivered_pack_id, context=context)
res[pick.id] = {'delivered_picking': delivered_pack.id or False}
return res
# views associated to each picking type
_VIEW_LIST = {
'out': 'view_picking_out_form',
'in': 'view_picking_in_form',
'internal': 'view_picking_form',
}
def _get_view_id(self, cr, uid, type):
"""Get the view id suiting the given type
@param type: the picking type as a string
@return: view i, or False if no view found
"""
res = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'stock', self._VIEW_LIST.get(type, 'view_picking_form'))
return res and res[1] or False
class stock_production_lot(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
res = []
for record in reads:
name = record['name']
prefix = record['prefix']
if prefix:
name = prefix + '/' + name
if record['ref']:
name = '%s [%s]' % (name, record['ref'])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
ids = []
if name:
ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
_name = 'stock.production.lot'
_description = 'Serial Number'
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
""" Gets stock of products for locations
@return: Dictionary of values
"""
if context is None:
context = {}
if 'location_id' not in context:
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
else:
locations = context['location_id'] and [context['location_id']] or []
if isinstance(ids, (int, long)):
ids = [ids]
res = {}.fromkeys(ids, 0.0)
if locations:
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
res.update(dict(cr.fetchall()))
return res
def _stock_search(self, cr, uid, obj, name, args, context=None):
""" Searches Ids of products
@return: Ids of locations
"""
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s group by prodlot_id
having sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
res = cr.fetchall()
ids = [('id', 'in', map(lambda x: x[0], res))]
return ids
_columns = {
'name': fields.char('Serial Number', size=64, required=True, help="Unique Serial Number, will be displayed as: PREFIX/SERIAL [INT_REF]"),
'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'date': fields.datetime('Creation Date', required=True),
'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
help="Current quantity of products with this Serial Number available in company warehouses",
digits_compute=dp.get_precision('Product Unit of Measure')),
'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
'company_id': fields.many2one('res.company', 'Company', select=True),
'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this serial number', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref, product_id, company_id)', 'The combination of Serial Number, internal reference, Product and Company must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
return value
def copy(self, cr, uid, id, default=None, context=None):
context = context or {}
default = default and default.copy() or {}
default.update(date=time.strftime('%Y-%m-%d %H:%M:%S'), move_ids=[])
return super(stock_production_lot, self).copy(cr, uid, id, default=default, context=context)
stock_production_lot()
class stock_production_lot_revision(osv.osv):
_name = 'stock.production.lot.revision'
_description = 'Serial Number Revision'
_columns = {
'name': fields.char('Revision Name', size=64, required=True),
'description': fields.text('Description'),
'date': fields.date('Revision Date'),
'indice': fields.char('Revision Number', size=16),
'author_id': fields.many2one('res.users', 'Author'),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', select=True, ondelete='cascade'),
'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
}
_defaults = {
'author_id': lambda x, y, z, c: z,
'date': fields.date.context_today,
}
stock_production_lot_revision()
# ----------------------------------------------------
# Move
# ----------------------------------------------------
#
# Fields:
# location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):
def _getSSCC(self, cr, uid, context=None):
cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
res = cr.fetchone()
return (res and res[0]) or False
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def action_partial_move(self, cr, uid, ids, context=None):
if context is None: context = {}
if context.get('active_model') != self._name:
context.update(active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.move").create(
cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.move',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name+' > '+line.location_dest_id.name
# optional prefixes
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if not move.prodlot_id and \
(move.state == 'done' and \
( \
(move.product_id.track_production and move.location_id.usage == 'production') or \
(move.product_id.track_production and move.location_dest_id.usage == 'production') or \
(move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
(move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') or \
(move.product_id.track_incoming and move.location_id.usage == 'inventory') \
)):
return False
return True
def _check_product_lot(self, cr, uid, ids, context=None):
""" Checks whether move is done or not and production lot is assigned to that move.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
return False
return True
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True,states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'prodlot_id': fields.many2one('stock.production.lot', 'Serial Number', help="Serial number is used to put a serial number on the production", select=True, ondelete='restrict'),
'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),
'auto_validate': fields.boolean('Auto Validate'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Product Price'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method is used)"),
'price_currency_id': fields.many2one('res.currency', 'Currency for average price', help="Technical field used to record the currency chosen by the user during a picking confirmation (when average price costing method is used)"),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'backorder_id': fields.related('picking_id','backorder_id',type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.related('picking_id','origin',type='char', size=64, relation="stock.picking", string="Source", store=True),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id','scrap_location',type='boolean',relation='stock.location',string='Scrapped', readonly=True),
'type': fields.related('picking_id', 'type', type='selection', selection=[('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], string='Shipping Type'),
}
def _check_location(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if (record.state=='done') and (record.location_id.usage == 'view'):
raise osv.except_osv(_('Error'), _('You cannot move product %s from a location of type view %s.')% (record.product_id.name, record.location_id.name))
if (record.state=='done') and (record.location_dest_id.usage == 'view' ):
raise osv.except_osv(_('Error'), _('You cannot move product %s to a location of type view %s.')% (record.product_id.name, record.location_dest_id.name))
return True
_constraints = [
(_check_tracking,
'You must assign a serial number for this product.',
['prodlot_id']),
(_check_location, 'You cannot move products from or to a location of the type view.',
['location_id','location_dest_id']),
(_check_product_lot,
'You try to assign a lot which is not from the same product.',
['prodlot_id'])]
def _default_location_destination(self, cr, uid, context=None):
""" Gets default address of partner for destination location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
if context['move_line'][0]:
if isinstance(context['move_line'][0], (tuple, list)):
location_id = context['move_line'][0][2] and context['move_line'][0][2].get('location_dest_id',False)
else:
move_list = self.pool.get('stock.move').read(cr, uid, context['move_line'][0], ['location_dest_id'])
location_id = move_list and move_list['location_dest_id'][0] or False
elif context.get('address_out_id', False):
property_out = self.pool.get('res.partner').browse(cr, uid, context['address_out_id'], context).property_stock_customer
location_id = property_out and property_out.id or False
else:
location_xml_id = False
if picking_type in ('in', 'internal'):
location_xml_id = 'stock_location_stock'
elif picking_type == 'out':
location_xml_id = 'stock_location_customers'
if location_xml_id:
try:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _default_location_source(self, cr, uid, context=None):
""" Gets default address of partner for source location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
try:
location_id = context['move_line'][0][2]['location_id']
except:
pass
elif context.get('address_in_id', False):
part_obj_add = self.pool.get('res.partner').browse(cr, uid, context['address_in_id'], context=context)
if part_obj_add:
location_id = part_obj_add.property_stock_supplier.id
else:
location_xml_id = False
if picking_type == 'in':
location_xml_id = 'stock_location_suppliers'
elif picking_type in ('out', 'internal'):
location_xml_id = 'stock_location_stock'
if location_xml_id:
try:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _default_destination_address(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
def _default_move_type(self, cr, uid, context=None):
""" Gets default type of move
@return: type
"""
if context is None:
context = {}
picking_type = context.get('picking_type')
type = 'internal'
if picking_type == 'in':
type = 'in'
elif picking_type == 'out':
type = 'out'
return type
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'type': _default_move_type,
'state': 'draft',
'priority': '1',
'product_qty': 1.0,
'scrapped' : False,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if uid != 1:
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.setdefault('tracking_id', False)
default.setdefault('prodlot_id', False)
default.setdefault('move_history_ids', [])
default.setdefault('move_history_ids2', [])
return super(stock_move, self).copy_data(cr, uid, id, default, context=context)
def _auto_init(self, cursor, context=None):
res = super(stock_move, self)._auto_init(cursor, context=context)
cursor.execute('SELECT indexname \
FROM pg_indexes \
WHERE indexname = \'stock_move_location_id_location_dest_id_product_id_state\'')
if not cursor.fetchone():
cursor.execute('CREATE INDEX stock_move_location_id_location_dest_id_product_id_state \
ON stock_move (product_id, state, location_id, location_dest_id)')
return res
def onchange_lot_id(self, cr, uid, ids, prodlot_id=False, product_qty=False,
loc_id=False, product_id=False, uom_id=False, context=None):
""" On change of production lot gives a warning message.
@param prodlot_id: Changed production lot id
@param product_qty: Quantity of product
@param loc_id: Location id
@param product_id: Product id
@return: Warning message
"""
if not prodlot_id or not loc_id:
return {}
ctx = context and context.copy() or {}
ctx['location_id'] = loc_id
ctx.update({'raise-exception': True})
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product_uom = product_obj.browse(cr, uid, product_id, context=ctx).uom_id
prodlot = self.pool.get('stock.production.lot').browse(cr, uid, prodlot_id, context=ctx)
location = self.pool.get('stock.location').browse(cr, uid, loc_id, context=ctx)
uom = uom_obj.browse(cr, uid, uom_id, context=ctx)
amount_actual = uom_obj._compute_qty_obj(cr, uid, product_uom, prodlot.stock_available, uom, context=ctx)
warning = {}
if (location.usage == 'internal') and (product_qty > (amount_actual or 0.0)):
warning = {
'title': _('Insufficient Stock for Serial Number !'),
'message': _('You are moving %.2f %s but only %.2f %s available for this serial number.') % (product_qty, uom.name, amount_actual, uom.name)
}
return {'warning': warning}
def onchange_quantity(self, cr, uid, ids, product_id, product_qty,
product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <=0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: OpenERP will not "
"automatically generate a back order.") })
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_qty': 0.00
}
if (not product_id) or (product_uos_qty <=0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# No warning if the quantity was decreased to avoid double warnings:
# The clients should call onchange_quantity too anyway
if product_uos and product_uom and (product_uom != product_uos):
result['product_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_qty': 1.00,
'product_uos_qty' : self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
'prodlot_id' : False,
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def onchange_move_type(self, cr, uid, ids, type, context=None):
""" On change of move type gives sorce and destination location.
@param type: Move Type
@return: Dictionary of values
"""
mod_obj = self.pool.get('ir.model.data')
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_stock'
if type == 'in':
location_source_id = 'stock_location_suppliers'
location_dest_id = 'stock_location_stock'
elif type == 'out':
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_customers'
try:
source_location = mod_obj.get_object_reference(cr, uid, 'stock', location_source_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [source_location[1]], 'read', context=context)
except (orm.except_orm, ValueError):
source_location = False
try:
dest_location = mod_obj.get_object_reference(cr, uid, 'stock', location_dest_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [dest_location[1]], 'read', context=context)
except (orm.except_orm, ValueError):
dest_location = False
return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime('%Y-%m-%d %H:%M:%S')
return {'value':{'date': date_expected}}
def _chain_compute(self, cr, uid, moves, context=None):
""" Finds whether the location has chained location type or not.
@param moves: Stock moves
@return: Dictionary containing destination location with chained location type.
"""
result = {}
for m in moves:
dest = self.pool.get('stock.location').chained_location_get(
cr,
uid,
m.location_dest_id,
m.picking_id and m.picking_id.partner_id and m.picking_id.partner_id,
m.product_id,
context
)
if dest:
if dest[1] == 'transparent':
newdate = (datetime.strptime(m.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=dest[2] or 0)).strftime('%Y-%m-%d')
self.write(cr, uid, [m.id], {
'date': newdate,
'location_dest_id': dest[0].id})
if m.picking_id and (dest[3] or dest[5]):
self.pool.get('stock.picking').write(cr, uid, [m.picking_id.id], {
'stock_journal_id': dest[3] or m.picking_id.stock_journal_id.id,
'type': dest[5] or m.picking_id.type
}, context=context)
m.location_dest_id = dest[0]
res2 = self._chain_compute(cr, uid, [m], context=context)
for pick_id in res2.keys():
result.setdefault(pick_id, [])
result[pick_id] += res2[pick_id]
else:
result.setdefault(m.picking_id, [])
result[m.picking_id].append( (m, dest) )
return result
def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
"""Prepare the definition (values) to create a new chained picking.
:param str picking_name: desired new picking name
:param browse_record picking: source picking (being chained to)
:param str picking_type: desired new picking type
:param list moves_todo: specification of the stock moves to be later included in this
picking, in the form::
[[move, (dest_location, auto_packing, chained_delay, chained_journal,
chained_company_id, chained_picking_type)],
...
]
See also :meth:`stock_location.chained_location_get`.
"""
res_company = self.pool.get('res.company')
return {
'name': picking_name,
'origin': tools.ustr(picking.origin or ''),
'type': picking_type,
'note': picking.note,
'move_type': picking.move_type,
'auto_picking': moves_todo[0][1][1] == 'auto',
'stock_journal_id': moves_todo[0][1][3],
'company_id': moves_todo[0][1][4] or res_company._company_default_get(cr, uid, 'stock.company', context=context),
'partner_id': picking.partner_id.id,
'invoice_state': 'none',
'date': picking.date,
}
def _create_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
picking_obj = self.pool.get('stock.picking')
return picking_obj.create(cr, uid, self._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context))
def create_chained_picking(self, cr, uid, moves, context=None):
res_obj = self.pool.get('res.company')
location_obj = self.pool.get('stock.location')
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
new_moves = []
if context is None:
context = {}
seq_obj = self.pool.get('ir.sequence')
for picking, chained_moves in self._chain_compute(cr, uid, moves, context=context).items():
# We group the moves by automatic move type, so it creates different pickings for different types
moves_by_type = {}
for move in chained_moves:
moves_by_type.setdefault(move[1][1], []).append(move)
for todo in moves_by_type.values():
ptype = todo[0][1][5] and todo[0][1][5] or location_obj.picking_type_get(cr, uid, todo[0][0].location_dest_id, todo[0][1][0])
if picking:
# name of new picking according to its type
if ptype == 'internal':
new_pick_name = seq_obj.get(cr, uid,'stock.picking')
else :
new_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + ptype)
pickid = self._create_chained_picking(cr, uid, new_pick_name, picking, ptype, todo, context=context)
# Need to check name of old picking because it always considers picking as "OUT" when created from Sales Order
old_ptype = location_obj.picking_type_get(cr, uid, picking.move_lines[0].location_id, picking.move_lines[0].location_dest_id)
if old_ptype != picking.type:
old_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + old_ptype)
self.pool.get('stock.picking').write(cr, uid, [picking.id], {'name': old_pick_name, 'type': old_ptype}, context=context)
else:
pickid = False
for move, (loc, dummy, delay, dummy, company_id, ptype, invoice_state) in todo:
new_id = move_obj.copy(cr, uid, move.id, {
'location_id': move.location_dest_id.id,
'location_dest_id': loc.id,
'date': time.strftime('%Y-%m-%d'),
'picking_id': pickid,
'state': 'waiting',
'company_id': company_id or res_obj._company_default_get(cr, uid, 'stock.company', context=context) ,
'move_history_ids': [],
'date_expected': (datetime.strptime(move.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=delay or 0)).strftime('%Y-%m-%d'),
'move_history_ids2': []}
)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': new_id,
'move_history_ids': [(4, new_id)]
})
new_moves.append(self.browse(cr, uid, [new_id])[0])
if pickid:
wf_service.trg_validate(uid, 'stock.picking', pickid, 'button_confirm', cr)
if new_moves:
new_moves += self.create_chained_picking(cr, uid, new_moves, context)
return new_moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move.
@return: List of ids.
"""
moves = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
self.create_chained_picking(cr, uid, moves, context)
return []
def action_assign(self, cr, uid, ids, *args):
""" Changes state to confirmed or waiting.
@return: List of values
"""
todo = []
for move in self.browse(cr, uid, ids):
if move.state in ('confirmed', 'waiting'):
todo.append(move.id)
res = self.check_assign(cr, uid, todo)
return res
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
def cancel_assign(self, cr, uid, ids, context=None):
""" Changes the state to confirmed.
@return: True
"""
self.write(cr, uid, ids, {'state': 'confirmed'})
# fix for bug lp:707031
# called write of related picking because changing move availability does
# not trigger workflow of picking in order to change the state of picking
seen = set()
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id and move.picking_id.id not in seen:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
seen.add(move.picking_id.id)
return True
#
# Duplicate stock.move
#
def check_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
@return: No. of moves done
"""
done = []
count = 0
pickings = {}
if context is None:
context = {}
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.type == 'consu' or move.location_id.usage == 'supplier':
if move.state in ('confirmed', 'waiting'):
done.append(move.id)
pickings[move.picking_id.id] = 1
continue
if move.state in ('confirmed', 'waiting'):
# Important: we must pass lock=True to _product_reserve() to avoid race conditions and double reservations
res = self.pool.get('stock.location')._product_reserve(cr, uid, [move.location_id.id], move.product_id.id, move.product_qty, {'uom': move.product_uom.id}, lock=True)
if res:
#_product_available_test depends on the next status for correct functioning
#the test does not work correctly if the same product occurs multiple times
#in the same order. This is e.g. the case when using the button 'split in two' of
#the stock outgoing form
self.write(cr, uid, [move.id], {'state':'assigned'})
done.append(move.id)
pickings[move.picking_id.id] = 1
r = res.pop(0)
product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, [move.id], move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
move.write({
'location_id': r[1],
'product_qty': r[0],
'product_uos_qty': product_uos_qty,
})
while res:
r = res.pop(0)
product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, [move.id], move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
move_id = self.copy(cr, uid, move.id, {'product_uos_qty': product_uos_qty, 'product_qty': r[0], 'location_id': r[1]})
done.append(move_id)
if done:
count += len(done)
self.write(cr, uid, done, {'state': 'assigned'})
if count:
for pick_id in pickings:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return count
def setlast_tracking(self, cr, uid, ids, context=None):
assert len(ids) == 1, "1 ID expected, got %s" % (ids, )
tracking_obj = self.pool['stock.tracking']
move = self.browse(cr, uid, ids[0], context=context)
picking_id = move.picking_id.id
if picking_id:
move_ids = self.search(cr, uid, [
('picking_id', '=', picking_id),
('tracking_id', '!=', False)
], limit=1, order='tracking_id DESC', context=context)
if move_ids:
tracking_move = self.browse(cr, uid, move_ids[0],
context=context)
tracking_id = tracking_move.tracking_id.id
else:
tracking_id = tracking_obj.create(cr, uid, {}, context=context)
self.write(cr, uid, move.id,
{'tracking_id': tracking_id},
context=context)
return True
#
# Cancel move => cancel others move and pickings
#
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
if not len(ids):
return True
if context is None:
context = {}
pickings = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('confirmed', 'waiting', 'assigned', 'draft'):
if move.picking_id:
pickings.add(move.picking_id.id)
if move.move_dest_id and move.move_dest_id.state == 'waiting':
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if context.get('call_unlink',False) and move.move_dest_id.picking_id:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if not context.get('call_unlink',False):
for pick in self.pool.get('stock.picking').browse(cr, uid, list(pickings), context=context):
if all(move.state == 'cancel' for move in pick.move_lines):
self.pool.get('stock.picking').write(cr, uid, [pick.id], {'state': 'cancel'}, context=context)
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
return True
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the move.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
if acc_dest == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Output Account of this product and Valuation account on category of this product are same.'))
if acc_src == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Input Account of this product and Valuation account on category of this product are same.'))
if not acc_src:
raise osv.except_osv(_('Error!'), _('Please define stock input account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not acc_dest:
raise osv.except_osv(_('Error!'), _('Please define stock output account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not journal_id:
raise osv.except_osv(_('Error!'), _('Please define journal on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
if not acc_valuation:
raise osv.except_osv(_('Error!'), _('Please define inventory valuation account on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
return journal_id, acc_src, acc_dest, acc_valuation
def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):
"""
Return the reference amount and reference currency representing the inventory valuation for this move.
These reference values should possibly be converted before being posted in Journals to adapt to the primary
and secondary currencies of the relevant accounts.
"""
product_uom_obj = self.pool.get('product.uom')
# by default the reference currency is that of the move's company
reference_currency_id = move.company_id.currency_id.id
default_uom = move.product_id.uom_id.id
qty = product_uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, default_uom)
# if product is set to average price and a specific value was entered in the picking wizard,
# we use it
if move.location_dest_id.usage != 'internal' and move.product_id.cost_method == 'average':
reference_amount = qty * move.product_id.standard_price
elif move.product_id.cost_method == 'average' and move.price_unit:
reference_amount = qty * move.price_unit
reference_currency_id = move.price_currency_id.id or reference_currency_id
# Otherwise we default to the company's valuation price type, considering that the values of the
# valuation field are expressed in the default currency of the move's company.
else:
if context is None:
context = {}
currency_ctx = dict(context, currency_id = move.company_id.currency_id.id)
amount_unit = move.product_id.price_get('standard_price', context=currency_ctx)[move.product_id.id]
reference_amount = amount_unit * qty
return reference_amount, reference_currency_id
def _update_average_price(self, cr, uid, move, context=None):
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
product_avail = {}
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
product_qty = move.product_qty
product_uom = move.product_uom.id
product_price = move.price_unit
product_currency = move.price_currency_id.id
if product.id not in product_avail:
# keep track of stock on hand including processed lines not yet marked as done
product_avail[product.id] = product.qty_available
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price, round=False)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product_avail[product.id] <= 0:
product_avail[product.id] = 0
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id])\
+ (new_price * qty))/(product_avail[product.id] + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
product_avail[product.id] += qty
def _create_product_valuation_moves(self, cr, uid, move, context=None):
"""
Generate the appropriate accounting moves if the product being moves is subject
to real_time valuation tracking, and the source or destination location is
a transit location or is outside of the company.
"""
if move.product_id.valuation == 'real_time': # FIXME: product valuation should perhaps be a property?
if context is None:
context = {}
src_company_ctx = dict(context,force_company=move.location_id.company_id.id)
dest_company_ctx = dict(context,force_company=move.location_dest_id.company_id.id)
# do not take the company of the one of the user
# used to select the correct period
company_ctx = dict(context, company_id=move.company_id.id)
account_moves = []
# Outgoing moves (or cross-company output part)
if move.location_id.company_id \
and (move.location_id.usage == 'internal' and move.location_dest_id.usage != 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, src_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#returning goods to supplier
if move.location_dest_id.usage == 'supplier':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_src, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_dest, reference_amount, reference_currency_id, context))]
# Incoming moves (or cross-company input part)
if move.location_dest_id.company_id \
and (move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, dest_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#goods return from customer
if move.location_id.usage == 'customer':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_dest, acc_valuation, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_src, acc_valuation, reference_amount, reference_currency_id, context))]
move_obj = self.pool.get('account.move')
for j_id, move_lines in account_moves:
move_obj.create(cr, uid,
{
'journal_id': j_id,
'line_id': move_lines,
'company_id': move.company_id.id,
'ref': move.picking_id and move.picking_id.name}, context=company_ctx)
def action_done(self, cr, uid, ids, context=None):
""" Makes the move done and if all moves are done, it will finish the picking.
@return:
"""
picking_ids = []
move_ids = []
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state=="draft":
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
for move in self.browse(cr, uid, ids, context=context):
if move.state in ['done','cancel']:
continue
move_ids.append(move.id)
if move.picking_id:
picking_ids.append(move.picking_id.id)
if move.move_dest_id.id and (move.state != 'done'):
# Downstream move should only be triggered if this move is the last pending upstream move
other_upstream_move_ids = self.search(cr, uid, [('id','not in',move_ids),('state','not in',['done','cancel']),
('move_dest_id','=',move.move_dest_id.id)], context=context)
if not other_upstream_move_ids:
self.write(cr, uid, [move.id], {'move_history_ids': [(4, move.move_dest_id.id)]})
if move.move_dest_id.state in ('waiting', 'confirmed'):
self.force_assign(cr, uid, [move.move_dest_id.id], context=context)
if move.move_dest_id.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
if move.move_dest_id.auto_validate:
self.action_done(cr, uid, [move.move_dest_id.id], context=context)
self._update_average_price(cr, uid, move, context=context)
self._create_product_valuation_moves(cr, uid, move, context=context)
if move.state not in ('confirmed','done','assigned'):
self.action_confirm(cr, uid, [move.id], context=context)
self.write(cr, uid, [move.id],
{'state': 'done',
'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)},
context=context)
for pick_id in picking_ids:
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return True
def _create_account_move_line(self, cr, uid, move, src_account_id, dest_account_id, reference_amount, reference_currency_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given stock move.
"""
# prepare default values considering that the destination accounts have the reference_currency_id as their main currency
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'product_uom_id': move.product_uom and move.product_uom.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'debit': reference_amount,
'account_id': dest_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'product_uom_id': move.product_uom and move.product_uom.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'credit': reference_amount,
'account_id': src_account_id,
}
# if we are posting to accounts in a different currency, provide correct values in both currencies correctly
# when compatible with the optional secondary currency on the account.
# Financial Accounts only accept amounts in secondary currencies if there's no secondary currency on the account
# or if it's the same as that of the secondary amount being posted.
account_obj = self.pool.get('account.account')
src_acct, dest_acct = account_obj.browse(cr, uid, [src_account_id, dest_account_id], context=context)
src_main_currency_id = src_acct.company_id.currency_id.id
dest_main_currency_id = dest_acct.company_id.currency_id.id
cur_obj = self.pool.get('res.currency')
if reference_currency_id != src_main_currency_id:
# fix credit line:
credit_line_vals['credit'] = cur_obj.compute(cr, uid, reference_currency_id, src_main_currency_id, reference_amount, context=context)
if (not src_acct.currency_id) or src_acct.currency_id.id == reference_currency_id:
credit_line_vals.update(currency_id=reference_currency_id, amount_currency=-reference_amount)
if reference_currency_id != dest_main_currency_id:
# fix debit line:
debit_line_vals['debit'] = cur_obj.compute(cr, uid, reference_currency_id, dest_main_currency_id, reference_amount, context=context)
if (not dest_acct.currency_id) or dest_acct.currency_id.id == reference_currency_id:
debit_line_vals.update(currency_id=reference_currency_id, amount_currency=reference_amount)
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
for move in self.browse(cr, uid, ids, context=context):
if move.state != 'draft' and not ctx.get('call_unlink', False):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(
cr, uid, ids, context=ctx)
# _create_lot function is not used anywhere
def _create_lot(self, cr, uid, ids, product_id, prefix=False):
""" Creates production lot
@return: Production lot id
"""
prodlot_obj = self.pool.get('stock.production.lot')
prodlot_id = prodlot_obj.create(cr, uid, {'prefix': prefix, 'product_id': product_id})
return prodlot_id
def action_scrap(self, cr, uid, ids, quantity, location_id, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
#quantity should in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'tracking_id': move.tracking_id.id,
'prodlot_id': move.prodlot_id.id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
self.action_done(cr, uid, res, context=context)
return res
# action_split function is not used anywhere
# FIXME: deprecate this method
def action_split(self, cr, uid, ids, quantity, split_by_qty=1, prefix=False, with_lot=True, context=None):
""" Split Stock Move lines into production lot which specified split by quantity.
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be splited
@param split_by_qty : specify split by qty
@param prefix : specify prefix of production lot
@param with_lot : if true, prodcution lot will assign for split line otherwise not.
@param context: context arguments
@return: Splited move lines
"""
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
if split_by_qty <= 0 or quantity == 0:
return res
uos_qty = split_by_qty / move.product_qty * move.product_uos_qty
quantity_rest = quantity % split_by_qty
uos_qty_rest = split_by_qty / move.product_qty * move.product_uos_qty
update_val = {
'product_qty': split_by_qty,
'product_uos_qty': uos_qty,
}
for idx in range(int(quantity//split_by_qty)):
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
if quantity_rest > 0:
idx = int(quantity//split_by_qty)
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
return res
def action_consume(self, cr, uid, ids, quantity, location_id=False, context=None):
""" Consumed product with specific quatity from specific source location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be consumed
@param quantity : specify consume quantity
@param location_id : specify source location
@param context: context arguments
@return: Consumed lines
"""
#quantity should in MOVE UOM
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
move_qty = move.product_qty
if move_qty <= 0:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move.product_qty
quantity_rest -= quantity
uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty
if quantity_rest <= 0:
quantity_rest = 0
uos_qty_rest = 0
quantity = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
if float_compare(quantity_rest, 0, precision_rounding=move.product_id.uom_id.rounding):
default_val = {
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'location_id': location_id or move.location_id.id,
}
current_move = self.copy(cr, uid, move.id, default_val)
res += [current_move]
update_val = {}
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
self.write(cr, uid, [move.id], update_val)
else:
quantity_rest = quantity
uos_qty_rest = uos_qty
res += [move.id]
update_val = {
'product_qty' : quantity_rest,
'product_uos_qty' : uos_qty_rest,
'location_id': location_id or move.location_id.id,
}
self.write(cr, uid, [move.id], update_val)
self.action_done(cr, uid, res, context=context)
return res
# FIXME: needs refactoring, this code is partially duplicated in stock_picking.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s.') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
# Record the values that were chosen in the wizard, so they can be
# used for average price computation and inventory valuation
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : move.picking_id.id,
'state': 'assigned',
'move_dest_id': move.move_dest_id.id,
'price_unit': move.price_unit,
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
new_move = self.copy(cr, uid, move.id, defaults)
complete.append(self.browse(cr, uid, new_move))
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,
'prodlot_id': False,
'tracking_id': False,
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]
stock_move()
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
_columns = {
'name': fields.char('Inventory Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_done': fields.datetime('Date done'),
'inventory_line_id': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=True, states={'draft': [('readonly', False)]}),
'move_ids': fields.many2many('stock.move', 'stock_inventory_move_rel', 'inventory_id', 'move_id', 'Created Moves'),
'state': fields.selection( (('draft', 'Draft'), ('cancel','Cancelled'), ('confirm','Confirmed'), ('done', 'Done')), 'Status', readonly=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_ids': [], 'date_done': False})
return super(stock_inventory, self).copy(cr, uid, id, default, context=context)
def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
""" Creates a stock move from an inventory line
@param inventory_line:
@param move_vals:
@return:
"""
return self.pool.get('stock.move').create(cr, uid, move_vals)
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
""" Confirm the inventory and writes its finished date
@return: True
"""
if context is None:
context = {}
# to perform the correct inventory corrections we need analyze stock location by
# location, never recursively, so we use a special context
product_context = dict(context, compute_child=False)
location_obj = self.pool.get('stock.location')
for inv in self.browse(cr, uid, ids, context=context):
move_ids = []
for line in inv.inventory_line_id:
pid = line.product_id.id
product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)
amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]
change = line.product_qty - amount
lot_id = line.prod_lot_id.id
if change:
location_id = line.product_id.property_stock_inventory.id
value = {
'name': _('INV:') + (line.inventory_id.name or ''),
'product_id': line.product_id.id,
'product_uom': line.product_uom.id,
'prodlot_id': lot_id,
'date': inv.date,
}
if change > 0:
value.update( {
'product_qty': change,
'location_id': location_id,
'location_dest_id': line.location_id.id,
})
else:
value.update( {
'product_qty': -change,
'location_id': line.location_id.id,
'location_dest_id': location_id,
})
move_ids.append(self._inventory_line_hook(cr, uid, line, value))
self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})
self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
""" Cancels both stock move and inventory
@return: True
"""
move_obj = self.pool.get('stock.move')
account_move_obj = self.pool.get('account.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
for move in inv.move_ids:
account_move_ids = account_move_obj.search(cr, uid, [('name', '=', move.name)])
if account_move_ids:
account_move_data_l = account_move_obj.read(cr, uid, account_move_ids, ['state'], context=context)
for account_move in account_move_data_l:
if account_move['state'] == 'posted':
raise osv.except_osv(_('User Error!'),
_('In order to cancel this inventory, you must first unpost related journal entries.'))
account_move_obj.unlink(cr, uid, [account_move['id']], context=context)
self.write(cr, uid, [inv.id], {'state': 'cancel'}, context=context)
return True
stock_inventory()
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_rec_name = "inventory_id"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id','company_id',type='many2one',relation='res.company',string='Company',store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id','state',type='char',string='Status',readonly=True),
'product_name': fields.related('product_id', 'name', type='char', string='Product name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
def _default_stock_location(self, cr, uid, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
_defaults = {
'location_id': _default_stock_location
}
def on_change_product_id(self, cr, uid, ids, location_id, product, uom=False, to_date=False):
""" Changes UoM and name if product_id changes.
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_qty': 0.0, 'product_uom': False, 'prod_lot_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product)
uom = uom or obj_product.uom_id.id
amount = self.pool.get('stock.location')._product_get(cr, uid, location_id, [product], {'uom': uom, 'to_date': to_date, 'compute_child': False})[product]
result = {'product_qty': amount, 'product_uom': uom, 'prod_lot_id': False}
return {'value': result}
stock_inventory_line()
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Name', size=128, required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'partner_id': fields.many2one('res.partner', 'Owner Address'),
'lot_input_id': fields.many2one('stock.location', 'Location Input', required=True, domain=[('usage','<>','view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', required=True, domain=[('usage','=','internal')]),
'lot_output_id': fields.many2one('stock.location', 'Location Output', required=True, domain=[('usage','<>','view')]),
}
def _default_lot_input_stock_id(self, cr, uid, context=None):
try:
lot_input_stock_model, lot_input_stock_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [lot_input_stock_id], 'read', context=context)
except (ValueError, orm.except_orm):
# the user does not have read access on the location or it does not exists
lot_input_stock_id = False
return lot_input_stock_id
def _default_lot_output_id(self, cr, uid, context=None):
try:
lot_output_model, lot_output_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_output')
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [lot_output_id], 'read', context=context)
except (ValueError, orm.except_orm):
# the user does not have read access on the location or it does not exists
lot_output_id = False
return lot_output_id
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'lot_input_id': _default_lot_input_stock_id,
'lot_stock_id': _default_lot_input_stock_id,
'lot_output_id': _default_lot_output_id,
}
stock_warehouse()
#----------------------------------------------------------
# "Empty" Classes that are used to vary from the original stock.picking (that are dedicated to the internal pickings)
# in order to offer a different usability with different views, labels, available reports/wizards...
#----------------------------------------------------------
class stock_picking_in(osv.osv):
_name = "stock.picking.in"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Incoming Shipments"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
return self.pool['stock.picking'].read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
def message_post(self, *args, **kwargs):
"""Post the message on stock.picking to be able to see it in the form view when using the chatter"""
return self.pool.get('stock.picking').message_post(*args, **kwargs)
def message_subscribe(self, *args, **kwargs):
"""Send the subscribe action on stock.picking model as it uses _name in request"""
return self.pool.get('stock.picking').message_subscribe(*args, **kwargs)
def message_unsubscribe(self, *args, **kwargs):
"""Send the unsubscribe action on stock.picking model to match with subscribe"""
return self.pool.get('stock.picking').message_unsubscribe(*args, **kwargs)
def default_get(self, cr, uid, fields_list, context=None):
# merge defaults from stock.picking with possible defaults defined on stock.picking.in
defaults = self.pool['stock.picking'].default_get(cr, uid, fields_list, context=context)
in_defaults = super(stock_picking_in, self).default_get(cr, uid, fields_list, context=context)
defaults.update(in_defaults)
return defaults
def copy(self, cr, uid, id, default=None, context=None):
return self.pool['stock.picking'].copy(cr, uid, id, default=default, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.in', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Receive'),
('done', 'Received'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Receive: products reserved, simply waiting for confirmation.\n
* Received: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'in',
}
class stock_picking_out(osv.osv):
_name = "stock.picking.out"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Delivery Orders"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
return self.pool['stock.picking'].read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
def message_post(self, *args, **kwargs):
"""Post the message on stock.picking to be able to see it in the form view when using the chatter"""
return self.pool.get('stock.picking').message_post(*args, **kwargs)
def message_subscribe(self, *args, **kwargs):
"""Send the subscribe action on stock.picking model as it uses _name in request"""
return self.pool.get('stock.picking').message_subscribe(*args, **kwargs)
def message_unsubscribe(self, *args, **kwargs):
"""Send the unsubscribe action on stock.picking model to match with subscribe"""
return self.pool.get('stock.picking').message_unsubscribe(*args, **kwargs)
def default_get(self, cr, uid, fields_list, context=None):
# merge defaults from stock.picking with possible defaults defined on stock.picking.out
defaults = self.pool['stock.picking'].default_get(cr, uid, fields_list, context=context)
out_defaults = super(stock_picking_out, self).default_get(cr, uid, fields_list, context=context)
defaults.update(out_defaults)
return defaults
def copy(self, cr, uid, id, default=None, context=None):
return self.pool['stock.picking'].copy(cr, uid, id, default=default, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.out', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Deliver'),
('done', 'Delivered'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Deliver: products reserved, simply waiting for confirmation.\n
* Delivered: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'out',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
camptocamp/ngo-addons-backport
|
addons/stock/stock.py
|
Python
|
agpl-3.0
| 168,440
|
import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fbc0b6db'.decode('hex') #pchmessagestart
P2P_PORT = 52112
ADDRESS_VERSION = 23 #pubkey_address
RPC_PORT = 52111
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'aliencoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 40*100000000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 30 # s
SYMBOL = 'ALN'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'aliencoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/aliencoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.aliencoin'), 'aliencoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://cryptexplorer.com/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://cryptexplorer.com/address/'
TX_EXPLORER_URL_PREFIX = 'http://cryptexplorer.com/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.03e8
|
ptcrypto/p2pool-adaptive
|
p2pool/bitcoin/networks/aliencoin.py
|
Python
|
gpl-3.0
| 1,202
|
from django import forms
from django.utils.dateparse import parse_date, parse_datetime, parse_time
def deserialize_by_field(value, field):
"""
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
"""
if isinstance(field, forms.DateTimeField):
value = parse_datetime(value)
elif isinstance(field, forms.DateField):
value = parse_date(value)
elif isinstance(field, forms.TimeField):
value = parse_time(value)
return value
|
CTPUG/wafer
|
wafer/kv/utils.py
|
Python
|
isc
| 537
|
"""Helpers to execute scripts."""
import asyncio
from datetime import datetime, timedelta
from functools import partial
import itertools
import logging
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
from async_timeout import timeout
import voluptuous as vol
from homeassistant import exceptions
import homeassistant.components.device_automation as device_automation
from homeassistant.components.logger import LOGSEVERITY
import homeassistant.components.scene as scene
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ALIAS,
CONF_CHOOSE,
CONF_CONDITION,
CONF_CONDITIONS,
CONF_CONTINUE_ON_TIMEOUT,
CONF_COUNT,
CONF_DEFAULT,
CONF_DELAY,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_EVENT_DATA,
CONF_EVENT_DATA_TEMPLATE,
CONF_MODE,
CONF_REPEAT,
CONF_SCENE,
CONF_SEQUENCE,
CONF_TIMEOUT,
CONF_UNTIL,
CONF_VARIABLES,
CONF_WAIT_FOR_TRIGGER,
CONF_WAIT_TEMPLATE,
CONF_WHILE,
EVENT_HOMEASSISTANT_STOP,
SERVICE_TURN_ON,
)
from homeassistant.core import (
SERVICE_CALL_LIMIT,
Context,
HassJob,
HomeAssistant,
callback,
)
from homeassistant.helpers import condition, config_validation as cv, template
from homeassistant.helpers.event import async_call_later, async_track_template
from homeassistant.helpers.script_variables import ScriptVariables
from homeassistant.helpers.service import (
CONF_SERVICE_DATA,
async_prepare_call_from_config,
)
from homeassistant.helpers.trigger import (
async_initialize_triggers,
async_validate_trigger_config,
)
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import slugify
from homeassistant.util.dt import utcnow
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
SCRIPT_MODE_PARALLEL = "parallel"
SCRIPT_MODE_QUEUED = "queued"
SCRIPT_MODE_RESTART = "restart"
SCRIPT_MODE_SINGLE = "single"
SCRIPT_MODE_CHOICES = [
SCRIPT_MODE_PARALLEL,
SCRIPT_MODE_QUEUED,
SCRIPT_MODE_RESTART,
SCRIPT_MODE_SINGLE,
]
DEFAULT_SCRIPT_MODE = SCRIPT_MODE_SINGLE
CONF_MAX = "max"
DEFAULT_MAX = 10
CONF_MAX_EXCEEDED = "max_exceeded"
_MAX_EXCEEDED_CHOICES = list(LOGSEVERITY) + ["SILENT"]
DEFAULT_MAX_EXCEEDED = "WARNING"
ATTR_CUR = "current"
ATTR_MAX = "max"
ATTR_MODE = "mode"
DATA_SCRIPTS = "helpers.script"
_LOGGER = logging.getLogger(__name__)
_LOG_EXCEPTION = logging.ERROR + 1
_TIMEOUT_MSG = "Timeout reached, abort script."
_SHUTDOWN_MAX_WAIT = 60
def make_script_schema(schema, default_script_mode, extra=vol.PREVENT_EXTRA):
"""Make a schema for a component that uses the script helper."""
return vol.Schema(
{
**schema,
vol.Optional(CONF_MODE, default=default_script_mode): vol.In(
SCRIPT_MODE_CHOICES
),
vol.Optional(CONF_MAX, default=DEFAULT_MAX): vol.All(
vol.Coerce(int), vol.Range(min=2)
),
vol.Optional(CONF_MAX_EXCEEDED, default=DEFAULT_MAX_EXCEEDED): vol.All(
vol.Upper, vol.In(_MAX_EXCEEDED_CHOICES)
),
},
extra=extra,
)
STATIC_VALIDATION_ACTION_TYPES = (
cv.SCRIPT_ACTION_CALL_SERVICE,
cv.SCRIPT_ACTION_DELAY,
cv.SCRIPT_ACTION_WAIT_TEMPLATE,
cv.SCRIPT_ACTION_FIRE_EVENT,
cv.SCRIPT_ACTION_ACTIVATE_SCENE,
cv.SCRIPT_ACTION_VARIABLES,
)
async def async_validate_actions_config(
hass: HomeAssistant, actions: List[ConfigType]
) -> List[ConfigType]:
"""Validate a list of actions."""
return await asyncio.gather(
*[async_validate_action_config(hass, action) for action in actions]
)
async def async_validate_action_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
action_type = cv.determine_script_action(config)
if action_type in STATIC_VALIDATION_ACTION_TYPES:
pass
elif action_type == cv.SCRIPT_ACTION_DEVICE_AUTOMATION:
platform = await device_automation.async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "action"
)
config = platform.ACTION_SCHEMA(config) # type: ignore
elif action_type == cv.SCRIPT_ACTION_CHECK_CONDITION:
if config[CONF_CONDITION] == "device":
platform = await device_automation.async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
config = platform.CONDITION_SCHEMA(config) # type: ignore
elif action_type == cv.SCRIPT_ACTION_WAIT_FOR_TRIGGER:
config[CONF_WAIT_FOR_TRIGGER] = await async_validate_trigger_config(
hass, config[CONF_WAIT_FOR_TRIGGER]
)
elif action_type == cv.SCRIPT_ACTION_REPEAT:
config[CONF_SEQUENCE] = await async_validate_actions_config(
hass, config[CONF_REPEAT][CONF_SEQUENCE]
)
elif action_type == cv.SCRIPT_ACTION_CHOOSE:
if CONF_DEFAULT in config:
config[CONF_DEFAULT] = await async_validate_actions_config(
hass, config[CONF_DEFAULT]
)
for choose_conf in config[CONF_CHOOSE]:
choose_conf[CONF_SEQUENCE] = await async_validate_actions_config(
hass, choose_conf[CONF_SEQUENCE]
)
else:
raise ValueError(f"No validation for {action_type}")
return config
class _StopScript(Exception):
"""Throw if script needs to stop."""
class _ScriptRun:
"""Manage Script sequence run."""
def __init__(
self,
hass: HomeAssistant,
script: "Script",
variables: Dict[str, Any],
context: Optional[Context],
log_exceptions: bool,
) -> None:
self._hass = hass
self._script = script
self._variables = variables
self._context = context
self._log_exceptions = log_exceptions
self._step = -1
self._action: Optional[Dict[str, Any]] = None
self._stop = asyncio.Event()
self._stopped = asyncio.Event()
def _changed(self):
if not self._stop.is_set():
self._script._changed() # pylint: disable=protected-access
async def _async_get_condition(self, config):
# pylint: disable=protected-access
return await self._script._async_get_condition(config)
def _log(self, msg, *args, level=logging.INFO):
self._script._log(msg, *args, level=level) # pylint: disable=protected-access
async def async_run(self) -> None:
"""Run script."""
try:
if self._stop.is_set():
return
self._log("Running %s", self._script.running_description)
for self._step, self._action in enumerate(self._script.sequence):
if self._stop.is_set():
break
await self._async_step(log_exceptions=False)
except _StopScript:
pass
finally:
self._finish()
async def _async_step(self, log_exceptions):
try:
await getattr(
self, f"_async_{cv.determine_script_action(self._action)}_step"
)()
except Exception as ex:
if not isinstance(ex, (_StopScript, asyncio.CancelledError)) and (
self._log_exceptions or log_exceptions
):
self._log_exception(ex)
raise
def _finish(self):
self._script._runs.remove(self) # pylint: disable=protected-access
if not self._script.is_running:
self._script.last_action = None
self._changed()
self._stopped.set()
async def async_stop(self) -> None:
"""Stop script run."""
self._stop.set()
await self._stopped.wait()
def _log_exception(self, exception):
action_type = cv.determine_script_action(self._action)
error = str(exception)
level = logging.ERROR
if isinstance(exception, vol.Invalid):
error_desc = "Invalid data"
elif isinstance(exception, exceptions.TemplateError):
error_desc = "Error rendering template"
elif isinstance(exception, exceptions.Unauthorized):
error_desc = "Unauthorized"
elif isinstance(exception, exceptions.ServiceNotFound):
error_desc = "Service not found"
else:
error_desc = "Unexpected error"
level = _LOG_EXCEPTION
self._log(
"Error executing script. %s for %s at pos %s: %s",
error_desc,
action_type,
self._step + 1,
error,
level=level,
)
def _get_pos_time_period_template(self, key):
try:
return cv.positive_time_period(
template.render_complex(self._action[key], self._variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
self._log(
"Error rendering %s %s template: %s",
self._script.name,
key,
ex,
level=logging.ERROR,
)
raise _StopScript from ex
async def _async_delay_step(self):
"""Handle delay."""
delay = self._get_pos_time_period_template(CONF_DELAY)
self._script.last_action = self._action.get(CONF_ALIAS, f"delay {delay}")
self._log("Executing step %s", self._script.last_action)
delay = delay.total_seconds()
self._changed()
try:
async with timeout(delay):
await self._stop.wait()
except asyncio.TimeoutError:
pass
async def _async_wait_template_step(self):
"""Handle a wait template."""
if CONF_TIMEOUT in self._action:
delay = self._get_pos_time_period_template(CONF_TIMEOUT).total_seconds()
else:
delay = None
self._script.last_action = self._action.get(CONF_ALIAS, "wait template")
self._log(
"Executing step %s%s",
self._script.last_action,
"" if delay is None else f" (timeout: {timedelta(seconds=delay)})",
)
self._variables["wait"] = {"remaining": delay, "completed": False}
wait_template = self._action[CONF_WAIT_TEMPLATE]
wait_template.hass = self._hass
# check if condition already okay
if condition.async_template(self._hass, wait_template, self._variables):
self._variables["wait"]["completed"] = True
return
@callback
def async_script_wait(entity_id, from_s, to_s):
"""Handle script after template condition is true."""
self._variables["wait"] = {
"remaining": to_context.remaining if to_context else delay,
"completed": True,
}
done.set()
to_context = None
unsub = async_track_template(
self._hass, wait_template, async_script_wait, self._variables
)
self._changed()
done = asyncio.Event()
tasks = [
self._hass.async_create_task(flag.wait()) for flag in (self._stop, done)
]
try:
async with timeout(delay) as to_context:
await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
except asyncio.TimeoutError as ex:
if not self._action.get(CONF_CONTINUE_ON_TIMEOUT, True):
self._log(_TIMEOUT_MSG)
raise _StopScript from ex
self._variables["wait"]["remaining"] = 0.0
finally:
for task in tasks:
task.cancel()
unsub()
async def _async_run_long_action(self, long_task):
"""Run a long task while monitoring for stop request."""
async def async_cancel_long_task():
# Stop long task and wait for it to finish.
long_task.cancel()
try:
await long_task
except Exception: # pylint: disable=broad-except
pass
# Wait for long task while monitoring for a stop request.
stop_task = self._hass.async_create_task(self._stop.wait())
try:
await asyncio.wait(
{long_task, stop_task}, return_when=asyncio.FIRST_COMPLETED
)
# If our task is cancelled, then cancel long task, too. Note that if long task
# is cancelled otherwise the CancelledError exception will not be raised to
# here due to the call to asyncio.wait(). Rather we'll check for that below.
except asyncio.CancelledError:
await async_cancel_long_task()
raise
finally:
stop_task.cancel()
if long_task.cancelled():
raise asyncio.CancelledError
if long_task.done():
# Propagate any exceptions that occurred.
long_task.result()
else:
# Stopped before long task completed, so cancel it.
await async_cancel_long_task()
async def _async_call_service_step(self):
"""Call the service specified in the action."""
self._script.last_action = self._action.get(CONF_ALIAS, "call service")
self._log("Executing step %s", self._script.last_action)
domain, service, service_data = async_prepare_call_from_config(
self._hass, self._action, self._variables
)
running_script = (
domain == "automation"
and service == "trigger"
or domain in ("python_script", "script")
)
# If this might start a script then disable the call timeout.
# Otherwise use the normal service call limit.
if running_script:
limit = None
else:
limit = SERVICE_CALL_LIMIT
service_task = self._hass.async_create_task(
self._hass.services.async_call(
domain,
service,
service_data,
blocking=True,
context=self._context,
limit=limit,
)
)
if limit is not None:
# There is a call limit, so just wait for it to finish.
await service_task
return
await self._async_run_long_action(service_task)
async def _async_device_step(self):
"""Perform the device automation specified in the action."""
self._script.last_action = self._action.get(CONF_ALIAS, "device automation")
self._log("Executing step %s", self._script.last_action)
platform = await device_automation.async_get_device_automation_platform(
self._hass, self._action[CONF_DOMAIN], "action"
)
await platform.async_call_action_from_config(
self._hass, self._action, self._variables, self._context
)
async def _async_scene_step(self):
"""Activate the scene specified in the action."""
self._script.last_action = self._action.get(CONF_ALIAS, "activate scene")
self._log("Executing step %s", self._script.last_action)
await self._hass.services.async_call(
scene.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self._action[CONF_SCENE]},
blocking=True,
context=self._context,
)
async def _async_event_step(self):
"""Fire an event."""
self._script.last_action = self._action.get(
CONF_ALIAS, self._action[CONF_EVENT]
)
self._log("Executing step %s", self._script.last_action)
event_data = {}
for conf in [CONF_EVENT_DATA, CONF_EVENT_DATA_TEMPLATE]:
if conf not in self._action:
continue
try:
event_data.update(
template.render_complex(self._action[conf], self._variables)
)
except exceptions.TemplateError as ex:
self._log(
"Error rendering event data template: %s", ex, level=logging.ERROR
)
self._hass.bus.async_fire(
self._action[CONF_EVENT], event_data, context=self._context
)
async def _async_condition_step(self):
"""Test if condition is matching."""
self._script.last_action = self._action.get(
CONF_ALIAS, self._action[CONF_CONDITION]
)
cond = await self._async_get_condition(self._action)
check = cond(self._hass, self._variables)
self._log("Test condition %s: %s", self._script.last_action, check)
if not check:
raise _StopScript
async def _async_repeat_step(self):
"""Repeat a sequence."""
description = self._action.get(CONF_ALIAS, "sequence")
repeat = self._action[CONF_REPEAT]
saved_repeat_vars = self._variables.get("repeat")
def set_repeat_var(iteration, count=None):
repeat_vars = {"first": iteration == 1, "index": iteration}
if count:
repeat_vars["last"] = iteration == count
self._variables["repeat"] = repeat_vars
# pylint: disable=protected-access
script = self._script._get_repeat_script(self._step)
async def async_run_sequence(iteration, extra_msg=""):
self._log("Repeating %s: Iteration %i%s", description, iteration, extra_msg)
await self._async_run_script(script)
if CONF_COUNT in repeat:
count = repeat[CONF_COUNT]
if isinstance(count, template.Template):
try:
count = int(count.async_render(self._variables))
except (exceptions.TemplateError, ValueError) as ex:
self._log(
"Error rendering %s repeat count template: %s",
self._script.name,
ex,
level=logging.ERROR,
)
raise _StopScript from ex
extra_msg = f" of {count}"
for iteration in range(1, count + 1):
set_repeat_var(iteration, count)
await async_run_sequence(iteration, extra_msg)
if self._stop.is_set():
break
elif CONF_WHILE in repeat:
conditions = [
await self._async_get_condition(config) for config in repeat[CONF_WHILE]
]
for iteration in itertools.count(1):
set_repeat_var(iteration)
if self._stop.is_set() or not all(
cond(self._hass, self._variables) for cond in conditions
):
break
await async_run_sequence(iteration)
elif CONF_UNTIL in repeat:
conditions = [
await self._async_get_condition(config) for config in repeat[CONF_UNTIL]
]
for iteration in itertools.count(1):
set_repeat_var(iteration)
await async_run_sequence(iteration)
if self._stop.is_set() or all(
cond(self._hass, self._variables) for cond in conditions
):
break
if saved_repeat_vars:
self._variables["repeat"] = saved_repeat_vars
else:
del self._variables["repeat"]
async def _async_choose_step(self):
"""Choose a sequence."""
# pylint: disable=protected-access
choose_data = await self._script._async_get_choose_data(self._step)
for conditions, script in choose_data["choices"]:
if all(condition(self._hass, self._variables) for condition in conditions):
await self._async_run_script(script)
return
if choose_data["default"]:
await self._async_run_script(choose_data["default"])
async def _async_wait_for_trigger_step(self):
"""Wait for a trigger event."""
if CONF_TIMEOUT in self._action:
delay = self._get_pos_time_period_template(CONF_TIMEOUT).total_seconds()
else:
delay = None
self._script.last_action = self._action.get(CONF_ALIAS, "wait for trigger")
self._log(
"Executing step %s%s",
self._script.last_action,
"" if delay is None else f" (timeout: {timedelta(seconds=delay)})",
)
variables = {**self._variables}
self._variables["wait"] = {"remaining": delay, "trigger": None}
async def async_done(variables, context=None):
self._variables["wait"] = {
"remaining": to_context.remaining if to_context else delay,
"trigger": variables["trigger"],
}
done.set()
def log_cb(level, msg):
self._log(msg, level=level)
to_context = None
remove_triggers = await async_initialize_triggers(
self._hass,
self._action[CONF_WAIT_FOR_TRIGGER],
async_done,
self._script.domain,
self._script.name,
log_cb,
variables=variables,
)
if not remove_triggers:
return
self._changed()
done = asyncio.Event()
tasks = [
self._hass.async_create_task(flag.wait()) for flag in (self._stop, done)
]
try:
async with timeout(delay) as to_context:
await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
except asyncio.TimeoutError as ex:
if not self._action.get(CONF_CONTINUE_ON_TIMEOUT, True):
self._log(_TIMEOUT_MSG)
raise _StopScript from ex
self._variables["wait"]["remaining"] = 0.0
finally:
for task in tasks:
task.cancel()
remove_triggers()
async def _async_variables_step(self):
"""Set a variable value."""
self._script.last_action = self._action.get(CONF_ALIAS, "setting variables")
self._log("Executing step %s", self._script.last_action)
self._variables = self._action[CONF_VARIABLES].async_render(
self._hass, self._variables, render_as_defaults=False
)
async def _async_run_script(self, script):
"""Execute a script."""
await self._async_run_long_action(
self._hass.async_create_task(
script.async_run(self._variables, self._context)
)
)
class _QueuedScriptRun(_ScriptRun):
"""Manage queued Script sequence run."""
lock_acquired = False
async def async_run(self) -> None:
"""Run script."""
# Wait for previous run, if any, to finish by attempting to acquire the script's
# shared lock. At the same time monitor if we've been told to stop.
lock_task = self._hass.async_create_task(
self._script._queue_lck.acquire() # pylint: disable=protected-access
)
stop_task = self._hass.async_create_task(self._stop.wait())
try:
await asyncio.wait(
{lock_task, stop_task}, return_when=asyncio.FIRST_COMPLETED
)
except asyncio.CancelledError:
lock_task.cancel()
self._finish()
raise
finally:
stop_task.cancel()
self.lock_acquired = lock_task.done() and not lock_task.cancelled()
# If we've been told to stop, then just finish up. Otherwise, we've acquired the
# lock so we can go ahead and start the run.
if self._stop.is_set():
self._finish()
else:
await super().async_run()
def _finish(self):
# pylint: disable=protected-access
if self.lock_acquired:
self._script._queue_lck.release()
self.lock_acquired = False
super()._finish()
async def _async_stop_scripts_after_shutdown(hass, point_in_time):
"""Stop running Script objects started after shutdown."""
running_scripts = [
script for script in hass.data[DATA_SCRIPTS] if script["instance"].is_running
]
if running_scripts:
names = ", ".join([script["instance"].name for script in running_scripts])
_LOGGER.warning("Stopping scripts running too long after shutdown: %s", names)
await asyncio.gather(
*[
script["instance"].async_stop(update_state=False)
for script in running_scripts
]
)
async def _async_stop_scripts_at_shutdown(hass, event):
"""Stop running Script objects started before shutdown."""
async_call_later(
hass, _SHUTDOWN_MAX_WAIT, partial(_async_stop_scripts_after_shutdown, hass)
)
running_scripts = [
script
for script in hass.data[DATA_SCRIPTS]
if script["instance"].is_running and script["started_before_shutdown"]
]
if running_scripts:
names = ", ".join([script["instance"].name for script in running_scripts])
_LOGGER.debug("Stopping scripts running at shutdown: %s", names)
await asyncio.gather(
*[script["instance"].async_stop() for script in running_scripts]
)
_VarsType = Union[Dict[str, Any], MappingProxyType]
class Script:
"""Representation of a script."""
def __init__(
self,
hass: HomeAssistant,
sequence: Sequence[Dict[str, Any]],
name: str,
domain: str,
*,
# Used in "Running <running_description>" log message
running_description: Optional[str] = None,
change_listener: Optional[Callable[..., Any]] = None,
script_mode: str = DEFAULT_SCRIPT_MODE,
max_runs: int = DEFAULT_MAX,
max_exceeded: str = DEFAULT_MAX_EXCEEDED,
logger: Optional[logging.Logger] = None,
log_exceptions: bool = True,
top_level: bool = True,
variables: Optional[ScriptVariables] = None,
) -> None:
"""Initialize the script."""
all_scripts = hass.data.get(DATA_SCRIPTS)
if not all_scripts:
all_scripts = hass.data[DATA_SCRIPTS] = []
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, partial(_async_stop_scripts_at_shutdown, hass)
)
self._top_level = top_level
if top_level:
all_scripts.append(
{"instance": self, "started_before_shutdown": not hass.is_stopping}
)
self._hass = hass
self.sequence = sequence
template.attach(hass, self.sequence)
self.name = name
self.domain = domain
self.running_description = running_description or f"{domain} script"
self._change_listener = change_listener
self._change_listener_job = (
None if change_listener is None else HassJob(change_listener)
)
self.script_mode = script_mode
self._set_logger(logger)
self._log_exceptions = log_exceptions
self.last_action = None
self.last_triggered: Optional[datetime] = None
self._runs: List[_ScriptRun] = []
self.max_runs = max_runs
self._max_exceeded = max_exceeded
if script_mode == SCRIPT_MODE_QUEUED:
self._queue_lck = asyncio.Lock()
self._config_cache: Dict[Set[Tuple], Callable[..., bool]] = {}
self._repeat_script: Dict[int, Script] = {}
self._choose_data: Dict[int, Dict[str, Any]] = {}
self._referenced_entities: Optional[Set[str]] = None
self._referenced_devices: Optional[Set[str]] = None
self.variables = variables
self._variables_dynamic = template.is_complex(variables)
if self._variables_dynamic:
template.attach(hass, variables)
@property
def change_listener(self) -> Optional[Callable[..., Any]]:
"""Return the change_listener."""
return self._change_listener
@change_listener.setter
def change_listener(self, change_listener: Callable[..., Any]) -> None:
"""Update the change_listener."""
self._change_listener = change_listener
if (
self._change_listener_job is None
or change_listener != self._change_listener_job.target
):
self._change_listener_job = HassJob(change_listener)
def _set_logger(self, logger: Optional[logging.Logger] = None) -> None:
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(f"{__name__}.{slugify(self.name)}")
def update_logger(self, logger: Optional[logging.Logger] = None) -> None:
"""Update logger."""
self._set_logger(logger)
for script in self._repeat_script.values():
script.update_logger(self._logger)
for choose_data in self._choose_data.values():
for _, script in choose_data["choices"]:
script.update_logger(self._logger)
if choose_data["default"]:
choose_data["default"].update_logger(self._logger)
def _changed(self):
if self._change_listener_job:
self._hass.async_run_hass_job(self._change_listener_job)
def _chain_change_listener(self, sub_script):
if sub_script.is_running:
self.last_action = sub_script.last_action
self._changed()
@property
def is_running(self) -> bool:
"""Return true if script is on."""
return len(self._runs) > 0
@property
def runs(self) -> int:
"""Return the number of current runs."""
return len(self._runs)
@property
def supports_max(self) -> bool:
"""Return true if the current mode support max."""
return self.script_mode in (SCRIPT_MODE_PARALLEL, SCRIPT_MODE_QUEUED)
@property
def referenced_devices(self):
"""Return a set of referenced devices."""
if self._referenced_devices is not None:
return self._referenced_devices
referenced = set()
for step in self.sequence:
action = cv.determine_script_action(step)
if action == cv.SCRIPT_ACTION_CHECK_CONDITION:
referenced |= condition.async_extract_devices(step)
elif action == cv.SCRIPT_ACTION_DEVICE_AUTOMATION:
referenced.add(step[CONF_DEVICE_ID])
self._referenced_devices = referenced
return referenced
@property
def referenced_entities(self):
"""Return a set of referenced entities."""
if self._referenced_entities is not None:
return self._referenced_entities
referenced = set()
for step in self.sequence:
action = cv.determine_script_action(step)
if action == cv.SCRIPT_ACTION_CALL_SERVICE:
data = step.get(CONF_SERVICE_DATA)
if not data:
continue
entity_ids = data.get(ATTR_ENTITY_ID)
if entity_ids is None or isinstance(entity_ids, template.Template):
continue
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
for entity_id in entity_ids:
referenced.add(entity_id)
elif action == cv.SCRIPT_ACTION_CHECK_CONDITION:
referenced |= condition.async_extract_entities(step)
elif action == cv.SCRIPT_ACTION_ACTIVATE_SCENE:
referenced.add(step[CONF_SCENE])
self._referenced_entities = referenced
return referenced
def run(
self, variables: Optional[_VarsType] = None, context: Optional[Context] = None
) -> None:
"""Run script."""
asyncio.run_coroutine_threadsafe(
self.async_run(variables, context), self._hass.loop
).result()
async def async_run(
self,
run_variables: Optional[_VarsType] = None,
context: Optional[Context] = None,
started_action: Optional[Callable[..., Any]] = None,
) -> None:
"""Run script."""
if context is None:
self._log(
"Running script requires passing in a context", level=logging.WARNING
)
context = Context()
if self.is_running:
if self.script_mode == SCRIPT_MODE_SINGLE:
if self._max_exceeded != "SILENT":
self._log("Already running", level=LOGSEVERITY[self._max_exceeded])
return
if self.script_mode == SCRIPT_MODE_RESTART:
self._log("Restarting")
await self.async_stop(update_state=False)
elif len(self._runs) == self.max_runs:
if self._max_exceeded != "SILENT":
self._log(
"Maximum number of runs exceeded",
level=LOGSEVERITY[self._max_exceeded],
)
return
# If this is a top level Script then make a copy of the variables in case they
# are read-only, but more importantly, so as not to leak any variables created
# during the run back to the caller.
if self._top_level:
if self.variables:
try:
variables = self.variables.async_render(
self._hass,
run_variables,
)
except template.TemplateError as err:
self._log("Error rendering variables: %s", err, level=logging.ERROR)
raise
elif run_variables:
variables = dict(run_variables)
else:
variables = {}
variables["context"] = context
else:
variables = cast(dict, run_variables)
if self.script_mode != SCRIPT_MODE_QUEUED:
cls = _ScriptRun
else:
cls = _QueuedScriptRun
run = cls(
self._hass, self, cast(dict, variables), context, self._log_exceptions
)
self._runs.append(run)
if started_action:
self._hass.async_run_job(started_action)
self.last_triggered = utcnow()
self._changed()
try:
await asyncio.shield(run.async_run())
except asyncio.CancelledError:
await run.async_stop()
self._changed()
raise
async def _async_stop(self, update_state):
aws = [run.async_stop() for run in self._runs]
if not aws:
return
await asyncio.wait(aws)
if update_state:
self._changed()
async def async_stop(self, update_state: bool = True) -> None:
"""Stop running script."""
await asyncio.shield(self._async_stop(update_state))
async def _async_get_condition(self, config):
if isinstance(config, template.Template):
config_cache_key = config.template
else:
config_cache_key = frozenset((k, str(v)) for k, v in config.items())
cond = self._config_cache.get(config_cache_key)
if not cond:
cond = await condition.async_from_config(self._hass, config, False)
self._config_cache[config_cache_key] = cond
return cond
def _prep_repeat_script(self, step):
action = self.sequence[step]
step_name = action.get(CONF_ALIAS, f"Repeat at step {step+1}")
sub_script = Script(
self._hass,
action[CONF_REPEAT][CONF_SEQUENCE],
f"{self.name}: {step_name}",
self.domain,
running_description=self.running_description,
script_mode=SCRIPT_MODE_PARALLEL,
max_runs=self.max_runs,
logger=self._logger,
top_level=False,
)
sub_script.change_listener = partial(self._chain_change_listener, sub_script)
return sub_script
def _get_repeat_script(self, step):
sub_script = self._repeat_script.get(step)
if not sub_script:
sub_script = self._prep_repeat_script(step)
self._repeat_script[step] = sub_script
return sub_script
async def _async_prep_choose_data(self, step):
action = self.sequence[step]
step_name = action.get(CONF_ALIAS, f"Choose at step {step+1}")
choices = []
for idx, choice in enumerate(action[CONF_CHOOSE], start=1):
conditions = [
await self._async_get_condition(config)
for config in choice.get(CONF_CONDITIONS, [])
]
sub_script = Script(
self._hass,
choice[CONF_SEQUENCE],
f"{self.name}: {step_name}: choice {idx}",
self.domain,
running_description=self.running_description,
script_mode=SCRIPT_MODE_PARALLEL,
max_runs=self.max_runs,
logger=self._logger,
top_level=False,
)
sub_script.change_listener = partial(
self._chain_change_listener, sub_script
)
choices.append((conditions, sub_script))
if CONF_DEFAULT in action:
default_script = Script(
self._hass,
action[CONF_DEFAULT],
f"{self.name}: {step_name}: default",
self.domain,
running_description=self.running_description,
script_mode=SCRIPT_MODE_PARALLEL,
max_runs=self.max_runs,
logger=self._logger,
top_level=False,
)
default_script.change_listener = partial(
self._chain_change_listener, default_script
)
else:
default_script = None
return {"choices": choices, "default": default_script}
async def _async_get_choose_data(self, step):
choose_data = self._choose_data.get(step)
if not choose_data:
choose_data = await self._async_prep_choose_data(step)
self._choose_data[step] = choose_data
return choose_data
def _log(self, msg, *args, level=logging.INFO):
msg = f"%s: {msg}"
args = [self.name, *args]
if level == _LOG_EXCEPTION:
self._logger.exception(msg, *args)
else:
self._logger.log(level, msg, *args)
|
sdague/home-assistant
|
homeassistant/helpers/script.py
|
Python
|
apache-2.0
| 38,558
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class QualityCheck(Document):
pass
|
VisheshHanda/production_backup
|
erpnext/stock/doctype/quality_check/quality_check.py
|
Python
|
gpl-3.0
| 278
|
#!/usr/bin/python
# File Name: StartConveyor.py
# Author: Saad Qazi
# Date: 2017/3/30
#
# Description: this python script will run the stepper motor contiuosly until
# an object is detected by the PIR motion sensor. Once an object is
# detected the conveyor belt will stop ie the steppe motor will stop
# rotating. The camera will then be launched via QrScan2.py file and
# the QR code on the object will be scanned, processed and saved to
# database stored on Amazons Web Services Cloud.
#
from StepperMotor import StepMotorFwd,StepMotorRvs,StepMotorZer,StepMotorFwdHalf,StepMotorFwdHalf2
from gpiozero import MotionSensor
from LedControl import StartingConveyorBelt, BlinkForDetectionOfObject, BlinkForScanningQrCode,StartingConveyorBeltQuick
from TempAndVoltSensor import GetVoltage, GetTemperature
#from newQrScan import ScanQrCode
import time
import datetime
import multiprocessing
pir = MotionSensor(21) #PIR Motion sensor gpio connection using BCM
StartingConveyorBelt() #blink Led to notif startig
def UpdateVoltage():
voltage = GetVoltage()
voltage = voltage * 2 #multiply by 2 cause of v-divider to get acctual voltage
print voltage
def UpdateTemp():
temp1 = GetTemperature()
print temp1
temp2 = GetTemperature() #do twice to elimtate previous value
print temp2
while True:
if pir.motion_detected:
print("Motion detected!")
current_state = pir.motion_detected
print "PIR State = ",current_state, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
UpdateTemp()
BlinkForScanningQrCode()
for i in range (3):
StepMotorFwdHalf2()
time.sleep(1)
if pir.motion_detected == True:
StepMotorRvs()
time.sleep(2.5)
elif pir.motion_detected != True: #PIR STATE FALSE
current_state = pir.motion_detected
print "PIR State = ",current_state, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if __name__ == '__main__':
jobs = []
for i in range(5):
p = multiprocessing.Process(target=UpdateVoltage)
jobs.append(p)
p.start()
StepMotorFwd()
# time.sleep(0.1)
|
saadq12/saadq12.github.io
|
raspberrypi_scripts/StartConveyor.py
|
Python
|
lgpl-3.0
| 2,290
|
#pragma repy restrictions.fewevents
# More than two events are not allowed:
# This tests that two (of two allowed) events can be consumed after
# the 'initialize' event ends (that is, it ending should free up an
# event).
def bar():
sleep(0.5)
def foo():
settimer(0,bar,[])
sleep(0.1)
if callfunc == 'initialize':
settimer(1,foo,[])
|
sburnett/seattle
|
repy/tests/ut_repytests_fewevents-justenoughevents.py
|
Python
|
mit
| 348
|
import re
USE_RAW_PREFIX = "regexp:"
def regexify(pattern):
"""
Normalize the wildcard pattern into a valid regex, or just strip prefix if the prefix matches USE_RAW_PREFIX
:param pattern:
:return:
"""
if pattern.strip().startswith(USE_RAW_PREFIX):
# Use raw
return pattern[len(USE_RAW_PREFIX) :]
else:
match_tokens = []
for tok in pattern.split("*"):
match_tokens.append(re.escape(tok))
return "^" + "(.*)".join(match_tokens) + "$"
def is_match(sanitizer, pattern, input_str):
"""
Utility method for running a pattern through the sanitizer and evaluating the input against generated regex
:param sanitizer:
:param pattern:
:param input_str:
:return:
"""
sanitized = sanitizer(pattern)
return True if re.match(sanitized, input_str) else False
|
anchore/anchore-engine
|
anchore_engine/util/matcher.py
|
Python
|
apache-2.0
| 866
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import collections
import errno
import inspect
import json
import math
import os
import re
import shutil
import string
import tempfile
import time
from castellan import key_manager
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils.secretutils import md5
from oslo_utils import units
import six
from cinder import compute
from cinder import coordination
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
nas_opts = [
cfg.StrOpt('nas_host',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.PortOpt('nas_ssh_port',
default=22,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.')),
]
volume_opts = [
cfg.StrOpt('nas_volume_prov_type',
default='thin',
choices=['thin', 'thick'],
help=('Provisioning type that will be used when '
'creating volumes.')),
]
CONF = cfg.CONF
CONF.register_opts(nas_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
def locked_volume_id_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named
with the id of the volume. This lock can be used by driver methods
to prevent conflicts with other operations modifying the same volume.
May be applied to methods that take a 'volume' or 'snapshot' argument.
"""
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('volume'):
volume_id = call_args['volume'].id
elif call_args.get('snapshot'):
volume_id = call_args['snapshot'].volume.id
else:
err_msg = _('The decorated method must accept either a volume or '
'a snapshot object')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, volume_id),
external=False)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
class BackingFileTemplate(string.Template):
"""Custom Template for substitutions in backing files regex strings
Changes the default delimiter from '$' to '#' in order to prevent
clashing with the regex end of line marker '$'.
"""
delimiter = '#'
idpattern = r'[a-z][_a-z0-9]*'
class RemoteFSDriver(driver.BaseVD):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = 'remotefs'
volume_backend_name = None
vendor_name = 'Open Source'
SHARE_FORMAT_REGEX = r'.+:/.+'
# We let the drivers inheriting this specify
# whether thin provisioning is supported or not.
_thin_provisioning_support = False
_thick_provisioning_support = False
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
self._supports_encryption = False
self.format = 'raw'
if self.configuration:
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
@volume_utils.trace
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume.provider_location,
'name': volume.name}
if volume.provider_location in self.shares:
data['options'] = self.shares[volume.provider_location]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
LOG.debug('NAS config: %s', secure_options)
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_provisioned_capacity(self):
"""Returns the provisioned capacity.
Get the sum of sizes of volumes, snapshots and any other
files on the mountpoint.
"""
provisioned_size = 0.0
for share in self.shares.keys():
mount_path = self._get_mount_point_for_share(share)
out, _ = self._execute('du', '--bytes', '-s', mount_path,
run_as_root=self._execute_as_root)
provisioned_size += int(out.split()[0])
return round(provisioned_size / units.Gi, 2)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
@staticmethod
def _validate_state(current_state,
acceptable_states,
obj_description='volume',
invalid_exc=exception.InvalidVolume):
if current_state not in acceptable_states:
message = _('Invalid %(obj_description)s state. '
'Acceptable states for this operation: '
'%(acceptable_states)s. '
'Current %(obj_description)s state: '
'%(current_state)s.')
raise invalid_exc(
message=message %
dict(obj_description=obj_description,
acceptable_states=acceptable_states,
current_state=current_state))
@volume_utils.trace
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
if volume.encryption_key_id and not self._supports_encryption:
message = _("Encryption is not yet supported.")
raise exception.VolumeDriverException(message=message)
LOG.debug('Creating volume %(vol)s', {'vol': volume.id})
self._ensure_shares_mounted()
volume.provider_location = self._find_share(volume)
LOG.info('casted to %s', volume.provider_location)
self._do_create_volume(volume)
return {'provider_location': volume.provider_location}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume.size
encrypted = volume.encryption_key_id is not None
if encrypted:
encryption = volume_utils.check_encryption_provider(
volume,
volume.obj_context)
self._create_encrypted_volume_file(volume_path,
volume_size,
encryption,
volume.obj_context)
elif getattr(self.configuration,
self.driver_prefix + '_qcow2_volumes', False):
# QCOW2 volumes are inherently sparse, so this setting
# will override the _sparsed_volumes setting.
self._create_qcow2_file(volume_path, volume_size)
self.format = 'qcow2'
elif getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes', False):
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
volume.admin_metadata['format'] = self.format
# This is done here because when creating a volume from image,
# while encountering other volume.save() method fails for
# non-admins
with volume.obj_as_admin():
volume.save()
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and mount them locally."""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares:
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error('Exception during mounting %s', exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s', self._mounted_shares)
@volume_utils.trace
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s',
{'vol': volume.id, 'loc': volume.provider_location})
if not volume.provider_location:
LOG.warning('Volume %s does not have '
'provider_location specified, '
'skipping', volume.name)
return
self._ensure_share_mounted(volume.provider_location)
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume.provider_location)
def create_export(self, ctx, volume, connector):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Delete snapshot.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi // (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _create_encrypted_volume_file(self,
path,
size_gb,
encryption,
context):
"""Create an encrypted volume.
This works by creating an encrypted image locally,
and then uploading it to the volume.
"""
cipher_spec = image_utils.decode_cipher(encryption['cipher'],
encryption['key_size'])
# TODO(enriquetaso): share this code w/ the RBD driver
# Fetch the key associated with the volume and decode the passphrase
keymgr = key_manager.API(CONF)
key = keymgr.get(context, encryption['encryption_key_id'])
passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
# create a file
tmp_dir = volume_utils.image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key:
# TODO(enriquetaso): encrypt w/ aes256 cipher text
# (qemu-img feature) ?
with open(tmp_key.name, 'w') as f:
f.write(passphrase)
self._execute(
'qemu-img', 'create', '-f', 'qcow2',
'-o',
'encrypt.format=luks,'
'encrypt.key-secret=sec1,'
'encrypt.cipher-alg=%(cipher_alg)s,'
'encrypt.cipher-mode=%(cipher_mode)s,'
'encrypt.ivgen-alg=%(ivgen_alg)s' % cipher_spec,
'--object', 'secret,id=sec1,format=raw,file=' + tmp_key.name,
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
LOG.warning('%(path)s is being set with open permissions: '
'%(perm)s', {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
remotefs_share = volume.provider_location
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume.name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume.size,
run_as_root=self._execute_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume.size,
run_as_root=self._execute_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=self._execute_as_root)
virt_size = data.virtual_size // units.Gi
if virt_size != volume.size:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume.size)
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
volume_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume),
volume,
run_as_root=self._execute_as_root)
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_host,
self.configuration.nas_share_path)):
LOG.debug('Using nas_host and nas_share_path configuration.')
nas_host = self.configuration.nas_host
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_host, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_host and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip()
# Replace \040 with a space, to support paths with spaces
share_address = share_address.replace("\\040", " ")
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error("Share %s ignored due to invalid format. "
"Must be of form address:/export.",
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = ("https://docs.openstack.org/cinder/latest/admin"
"/blockstorage-nfs-backend.html")
self.configuration.nas_secure_file_operations = 'false'
LOG.warning("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration.",
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warning("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration.",
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info('Cinder secure environment '
'indicator file exists.')
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=self._execute_as_root)
LOG.info('New Cinder secure environment indicator'
' file created at path %s.', file_path)
except IOError as err:
LOG.error('Failed to created Cinder secure '
'environment indicator file: %s',
err)
if err.errno == errno.EACCES:
LOG.warning('Reverting to non-secure mode. Adjust '
'permissions at %s to allow the '
'cinder volume service write access '
'to use secure mode.',
mount_point)
nas_option = 'false'
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriverBase(RemoteFSDriver):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
_VALID_IMAGE_EXTENSIONS = []
# The following flag may be overridden by the concrete drivers in order
# to avoid using temporary volume snapshots when creating volume clones,
# when possible.
_always_use_temp_snap_when_cloning = True
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriverBase, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriverBase, self).do_setup(context)
self._nova = compute.API()
def snapshot_revert_use_temp_snapshot(self):
# Considering that RemoteFS based drivers use COW images
# for storing snapshots, having chains of such images,
# creating a backup snapshot when reverting one is not
# actutally helpful.
return False
def _local_volume_dir(self, volume):
share = volume.provider_location
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume.name)
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot.volume)
snap_path = '%s.%s' % (vol_path, snapshot.id)
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
if not (os.path.exists(info_path) or os.name == 'nt'):
# We're not managing file permissions on Windows.
# Plus, 'truncate' is not available.
self._execute('truncate', "-s0", info_path,
run_as_root=self._execute_as_root)
self._set_rw_permissions(info_path)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir,
ext_bf_template=None,
force_share=False,
run_as_root=False):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
:param path: Path to the image file whose info is fetched
:param volume_name: Name of the volume
:param basedir: Path to backing files directory
:param ext_bf_template: Alt. string.Template for allowed backing files
:type object: BackingFileTemplate
:param force_share: Wether to force fetching img info for images in use
:param run_as_root: Wether to run with privileged permissions or not
"""
run_as_root = run_as_root or self._execute_as_root
info = image_utils.qemu_img_info(path,
force_share=force_share,
run_as_root=run_as_root)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
if self._VALID_IMAGE_EXTENSIONS:
valid_ext = r'(\.(%s))?' % '|'.join(
self._VALID_IMAGE_EXTENSIONS)
else:
valid_ext = ''
if ext_bf_template:
backing_file_template = ext_bf_template.substitute(
basedir=basedir, volname=volume_name, valid_ext=valid_ext
)
LOG.debug("Fetching qemu-img info with special "
"backing_file_template: %(bft)s", {
"bft": backing_file_template
})
else:
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % {
'basedir': basedir,
'volname': volume_name,
'valid_ext': valid_ext,
}
if not re.match(backing_file_template, info.backing_file,
re.IGNORECASE):
raise exception.RemoteFSInvalidBackingFile(
path=path, backing_file=info.backing_file)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path, passphrase_file=None, backing_file=None):
# TODO(eharney): this is not using the correct permissions for
# NFS snapshots
# It needs to run as root for volumes attached to instances, but
# does not when in secure mode.
cmd = ['qemu-img', 'commit']
if passphrase_file:
obj = ['--object',
'secret,id=s0,format=raw,file=%s' % passphrase_file]
image_opts = ['--image-opts']
src_opts = \
"file.filename=%(filename)s,encrypt.format=luks," \
"encrypt.key-secret=s0,backing.file.filename=%(backing)s," \
"backing.encrypt.key-secret=s0" % {
'filename': path,
'backing': backing_file,
}
path_no_to_delete = ['-d', src_opts]
cmd += obj + image_opts + path_no_to_delete
else:
cmd += ['-d', path]
self._execute(*cmd, run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format,
passphrase_file=None):
# qemu-img create must run as root, because it reads from the
# backing file, which will be owned by qemu:qemu if attached to an
# instance.
# TODO(erlon): Sanity check this.
command = ['qemu-img', 'rebase', '-u']
# if encrypted
if passphrase_file:
objectdef = "secret,id=s0,file=%s" % passphrase_file
filename = "encrypt.key-secret=s0,"\
"file.filename=%(filename)s" % {'filename': image}
command += ['--object', objectdef, '-b', backing_file,
'-F', volume_format, '--image-opts', filename]
# not encrypted
else:
command += ['-b', backing_file, image, '-F', volume_format]
self._execute(*command, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param info_path: path to file
:param empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_higher_image_path(self, snapshot):
volume = snapshot.volume
info_path = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot.id]
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
backing_chain = self._get_backing_chain_for_path(
volume, active_file_path)
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if utils.paths_normcase_equal(
f.get('backing-filename', ''),
snapshot_file)),
None)
return higher_file
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume.name)
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume.name)
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str.
Returns string in a hex format.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return md5(base_str, usedforsecurity=False).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _copy_volume_to_image(self, context, volume, image_service,
image_meta, store_id=None):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume.name)
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume.id, image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw',
run_as_root=self._execute_as_root)
upload_path = temp_path
else:
upload_path = active_file_path
volume_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
volume,
run_as_root=self._execute_as_root)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _local_path_active_image(self, volume):
active_fname = self.get_active_image_from_info(volume)
vol_dir = self._local_volume_dir(volume)
active_fpath = os.path.join(vol_dir, active_fname)
return active_fpath
def _get_snapshot_backing_file(self, snapshot):
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot.volume)
forward_file = snap_info[snapshot.id]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# in which this snapshot was created.
img_info = self._qemu_img_info(forward_path)
return img_info.backing_file
def _snapshots_exist(self, volume):
if not volume.provider_location:
return False
active_fpath = self._local_path_active_image(volume)
base_vol_path = self.local_path(volume)
return not utils.paths_normcase_equal(active_fpath, base_vol_path)
def _is_volume_attached(self, volume):
return volume.attach_status == fields.VolumeAttachStatus.ATTACHED
def _create_cloned_volume(self, volume, src_vref, context):
LOG.info('Cloning volume %(src)s to volume %(dst)s',
{'src': src_vref.id,
'dst': volume.id})
acceptable_states = ['available', 'backing-up', 'downloading']
self._validate_state(src_vref.status,
acceptable_states,
obj_description='source volume')
volume_name = CONF.volume_name_template % volume.id
# Create fake volume and snapshot objects
vol_attrs = ['provider_location', 'size', 'id', 'name', 'status',
'volume_type', 'metadata', 'obj_context']
Volume = collections.namedtuple('Volume', vol_attrs)
volume_info = Volume(provider_location=src_vref.provider_location,
size=src_vref.size,
id=volume.id,
name=volume_name,
status=src_vref.status,
volume_type=src_vref.volume_type,
metadata=src_vref.metadata,
obj_context=volume.obj_context)
if (self._always_use_temp_snap_when_cloning or
self._snapshots_exist(src_vref)):
kwargs = {
'volume_id': src_vref.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': src_vref.size,
'display_name': 'tmp-snap-%s' % volume.id,
'display_description': None,
'volume_type_id': src_vref.volume_type_id,
'encryption_key_id': src_vref.encryption_key_id,
}
temp_snapshot = objects.Snapshot(context=context,
**kwargs)
temp_snapshot.create()
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(
temp_snapshot,
volume_info,
volume.size,
src_encryption_key_id=src_vref.encryption_key_id,
new_encryption_key_id=volume.encryption_key_id)
# remove temp snapshot after the cloning is done
temp_snapshot.status = fields.SnapshotStatus.DELETING
temp_snapshot.context = context.elevated()
temp_snapshot.save()
finally:
self._delete_snapshot(temp_snapshot)
temp_snapshot.destroy()
else:
self._copy_volume_image(self.local_path(src_vref),
self.local_path(volume_info))
self._extend_volume(volume_info, volume.size)
if src_vref.admin_metadata and 'format' in src_vref.admin_metadata:
volume.admin_metadata['format'] = (
src_vref.admin_metadata['format'])
# This is done here because when cloning from a bootable volume,
# while encountering other volume.save() method fails
with volume.obj_as_admin():
volume.save()
return {'provider_location': src_vref.provider_location}
def _copy_volume_image(self, src_path, dest_path):
shutil.copyfile(src_path, dest_path)
self._set_rw_permissions(dest_path)
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot.id]
active_file = self.get_active_image_from_info(snapshot.volume)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot.volume), snapshot_file)
if utils.paths_normcase_equal(snapshot_file, active_file):
return
LOG.info('Deleting stale snapshot: %s', snapshot.id)
self._delete(snapshot_path)
del(snap_info[snapshot.id])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting %(type)s snapshot %(snap)s of volume %(vol)s',
{'snap': snapshot.id, 'vol': snapshot.volume.id,
'type': ('online'
if self._is_volume_attached(snapshot.volume)
else 'offline')})
volume_status = snapshot.volume.status
acceptable_states = ['available', 'in-use', 'backing-up', 'deleting',
'downloading']
self._validate_state(volume_status, acceptable_states)
vol_path = self._local_volume_dir(snapshot.volume)
volume_path = os.path.join(vol_path, snapshot.volume.name)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot.id not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.', snapshot.id)
return
snapshot_file = snap_info[snapshot.id]
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot.volume),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot.volume.name)
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning('No backing file found for %s, allowing '
'snapshot to be deleted.', snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot.volume.name)
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot.volume)
if self._is_volume_attached(snapshot.volume):
# Online delete
context = snapshot._context
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.items():
if utils.paths_normcase_equal(value,
base_file) and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
encrypted = snapshot.encryption_key_id is not None
if encrypted:
keymgr = key_manager.API(CONF)
encryption_key = snapshot.encryption_key_id
new_key = keymgr.get(snapshot.obj_context, encryption_key)
src_passphrase = \
binascii.hexlify(new_key.get_encoded()).decode('utf-8')
tmp_dir = volume_utils.image_conversion_dir()
if utils.paths_normcase_equal(snapshot_file, active_file):
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | committed down) |
if encrypted:
with tempfile.NamedTemporaryFile(prefix='luks_',
dir=tmp_dir) as src_file:
with open(src_file.name, 'w') as f:
f.write(src_passphrase)
self._img_commit(snapshot_path,
passphrase_file=src_file.name,
backing_file=volume_path)
else:
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | committed down) | exist, needs |
# used here) | | ptr update) |
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = self._get_higher_image_path(snapshot)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if utils.paths_normcase_equal(snap_info[i],
higher_file)
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
if encrypted:
with tempfile.NamedTemporaryFile(prefix='luks_',
dir=tmp_dir) as src_file:
with open(src_file.name, 'w') as f:
f.write(src_passphrase)
self._img_commit(snapshot_path,
passphrase_file=src_file.name,
backing_file=volume_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, volume_path,
base_file_fmt, src_file.name)
else:
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot.id])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
LOG.debug('Creating volume %(vol)s from snapshot %(snap)s',
{'vol': volume.id, 'snap': snapshot.id})
status = snapshot.status
acceptable_states = ['available', 'backing-up']
self._validate_state(status, acceptable_states,
obj_description='snapshot',
invalid_exc=exception.InvalidSnapshot)
self._ensure_shares_mounted()
volume.provider_location = self._find_share(volume)
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume.size,
snapshot.volume.encryption_key_id,
volume.encryption_key_id)
return {'provider_location': volume.provider_location}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size,
src_encryption_key_id=None,
new_encryption_key_id=None):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot.volume),
backing_filename)
volume_path = os.path.join(
self._local_volume_dir(snapshot.volume),
snapshot.volume.name)
info = self._qemu_img_info(backing_path_full_path,
snapshot.volume.name)
backing_fmt = info.file_format
obj_context = snapshot.volume.obj_context
# create new qcow2 file
if snapshot.volume.encryption_key_id is None:
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s,backing_fmt=%s' %
(backing_path_full_path, backing_fmt),
new_snap_path,
"%dG" % snapshot.volume.size]
self._execute(*command, run_as_root=self._execute_as_root)
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
# qemu-img rebase must run as root for the same reasons as above
self._execute(*command, run_as_root=self._execute_as_root)
else:
# encrypted
keymgr = key_manager.API(CONF)
# Get key for the source volume using the context of this request.
key = keymgr.get(obj_context,
snapshot.volume.encryption_key_id)
passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
tmp_dir = volume_utils.image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key:
with open(tmp_key.name, 'w') as f:
f.write(passphrase)
file_json_dict = {"driver": "qcow2",
"encrypt.key-secret": "s0",
"backing.encrypt.key-secret": "s0",
"backing.file.filename": volume_path,
"file": {"driver": "file",
"filename": backing_path_full_path,
}}
file_json = jsonutils.dumps(file_json_dict)
encryption = volume_utils.check_encryption_provider(
volume=snapshot.volume,
context=obj_context)
cipher_spec = image_utils.decode_cipher(encryption['cipher'],
encryption['key_size'])
command = ('qemu-img', 'create', '-f' 'qcow2',
'-o', 'encrypt.format=luks,encrypt.key-secret=s1,'
'encrypt.cipher-alg=%(cipher_alg)s,'
'encrypt.cipher-mode=%(cipher_mode)s,'
'encrypt.ivgen-alg=%(ivgen_alg)s' % cipher_spec,
'-b', 'json:' + file_json,
'--object', 'secret,id=s0,file=' + tmp_key.name,
'--object', 'secret,id=s1,file=' + tmp_key.name,
new_snap_path)
self._execute(*command, run_as_root=self._execute_as_root)
command_path = 'encrypt.key-secret=s0,file.filename='
command = ['qemu-img', 'rebase',
'--object', 'secret,id=s0,file=' + tmp_key.name,
'--image-opts',
command_path + new_snap_path,
'-u',
'-b', backing_filename,
'-F', backing_fmt]
# qemu-img rebase must run as root for the same reasons as
# above
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
# if in secure mode, chown new file
if self.secure_file_operations_enabled():
ref_file = backing_path_full_path
log_msg = 'Setting permissions: %(file)s -> %(user)s:%(group)s' % {
'file': ref_file, 'user': os.stat(ref_file).st_uid,
'group': os.stat(ref_file).st_gid}
LOG.debug(log_msg)
command = ['chown',
'--reference=%s' % ref_file,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
LOG.debug('Creating %(type)s snapshot %(snap)s of volume %(vol)s',
{'snap': snapshot.id, 'vol': snapshot.volume.id,
'type': ('online'
if self._is_volume_attached(snapshot.volume)
else 'offline')})
status = snapshot.volume.status
acceptable_states = ['available', 'in-use', 'backing-up']
if (snapshot.display_name and
snapshot.display_name.startswith('tmp-snap-')):
# This is an internal volume snapshot. In order to support
# image caching, we'll allow creating/deleting such snapshots
# while having volumes in 'downloading' state.
acceptable_states.append('downloading')
self._validate_state(status, acceptable_states)
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot.volume)
new_snap_path = self._get_new_snap_path(snapshot)
active = os.path.basename(new_snap_path)
if self._is_volume_attached(snapshot.volume):
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
# Update reference in the only attachment (no multi-attach support)
attachment = snapshot.volume.volume_attachment[0]
attachment.connection_info['name'] = active
# Let OVO know it has been updated
attachment.connection_info = attachment.connection_info
attachment.save()
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = active
snap_info[snapshot.id] = active
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot.id
}
try:
result = self._nova.create_volume_snapshot(
snapshot.obj_context,
snapshot.volume_id,
connection_info)
LOG.debug('nova call result: %s', result)
except Exception:
LOG.exception('Call to Nova to create snapshot failed')
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(snapshot.obj_context, snapshot.id)
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if s['status'] == fields.SnapshotStatus.CREATING:
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == fields.SnapshotStatus.ERROR:
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
elif (s['status'] == fields.SnapshotStatus.DELETING or
s['status'] == fields.SnapshotStatus.ERROR_DELETING):
msg = _('Snapshot %(id)s has been asked to be deleted while '
'waiting for it to become available. Perhaps a '
'concurrent request was made.') % {'id':
snapshot.id}
raise exception.RemoteFSConcurrentRequest(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot.id
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
update_format = False
if utils.paths_normcase_equal(info['active_file'],
info['snapshot_file']):
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot.volume.id}
del(snap_info[snapshot.id])
update_format = True
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot.volume.id}
del(snap_info[snapshot.id])
self._nova_assisted_vol_snap_delete(context, snapshot, delete_info)
if update_format:
snapshot.volume.admin_metadata['format'] = 'qcow2'
with snapshot.volume.obj_as_admin():
snapshot.volume.save()
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot.volume), file_to_delete)
self._delete(path_to_delete)
def _nova_assisted_vol_snap_delete(self, context, snapshot, delete_info):
try:
self._nova.delete_volume_snapshot(
context,
snapshot.id,
delete_info)
except Exception:
LOG.exception('Call to Nova delete snapshot failed')
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot.id)
if s['status'] == fields.SnapshotStatus.DELETING:
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot.id)
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot.id,
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot.id}
raise exception.RemoteFSException(msg)
def _extend_volume(self, volume, size_gb):
raise NotImplementedError()
def _revert_to_snapshot(self, context, volume, snapshot):
raise NotImplementedError()
class RemoteFSSnapDriver(RemoteFSSnapDriverBase):
@locked_volume_id_operation
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@locked_volume_id_operation
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@locked_volume_id_operation
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
# TODO: should be locking on src_vref id -- bug #1852449
@locked_volume_id_operation
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref,
src_vref.obj_context)
@locked_volume_id_operation
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context, volume, image_service,
image_meta)
@locked_volume_id_operation
def extend_volume(self, volume, size_gb):
return self._extend_volume(volume, size_gb)
@locked_volume_id_operation
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert to specified snapshot."""
return self._revert_to_snapshot(context, volume, snapshot)
class RemoteFSSnapDriverDistributed(RemoteFSSnapDriverBase):
@coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
# lock the source volume id first
@coordination.synchronized('{self.driver_prefix}-{src_vref.id}')
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref,
src_vref.obj_context)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context, volume, image_service,
image_meta)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def extend_volume(self, volume, size_gb):
return self._extend_volume(volume, size_gb)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert to specified snapshot."""
return self._revert_to_snapshot(context, volume, snapshot)
class RemoteFSPoolMixin(object):
"""Drivers inheriting this will report each share as a pool."""
def _find_share(self, volume):
# We let the scheduler choose a pool for us.
pool_name = self._get_pool_name_from_volume(volume)
share = self._get_share_from_pool_name(pool_name)
return share
def _get_pool_name_from_volume(self, volume):
pool_name = volume_utils.extract_host(volume['host'],
level='pool')
return pool_name
def _get_pool_name_from_share(self, share):
raise NotImplementedError()
def _get_share_from_pool_name(self, pool_name):
# To be implemented by drivers using pools.
raise NotImplementedError()
def _update_volume_stats(self):
data = {}
pools = []
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = self.vendor_name
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
for share in self._mounted_shares:
(share_capacity,
share_free,
total_allocated) = self._get_capacity_info(share)
pool = {'pool_name': self._get_pool_name_from_share(share),
'total_capacity_gb': share_capacity / float(units.Gi),
'free_capacity_gb': share_free / float(units.Gi),
'provisioned_capacity_gb': (
total_allocated / float(units.Gi)),
'reserved_percentage': (
self.configuration.reserved_percentage),
'max_over_subscription_ratio': (
self.configuration.max_over_subscription_ratio),
'thin_provisioning_support': (
self._thin_provisioning_support),
'thick_provisioning_support': (
self._thick_provisioning_support),
'QoS_support': False,
}
pools.append(pool)
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
data['pools'] = pools
self._stats = data
class RevertToSnapshotMixin(object):
def _revert_to_snapshot(self, context, volume, snapshot):
"""Revert a volume to specified snapshot
The volume must not be attached. Only the latest snapshot
can be used.
"""
status = snapshot.volume.status
acceptable_states = ['available', 'reverting']
self._validate_state(status, acceptable_states)
LOG.debug('Reverting volume %(vol)s to snapshot %(snap)s',
{'vol': snapshot.volume.id, 'snap': snapshot.id})
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot.id]
active_file = snap_info['active']
if not utils.paths_normcase_equal(snapshot_file, active_file):
msg = _("Could not revert volume '%(volume_id)s' to snapshot "
"'%(snapshot_id)s' as it does not "
"appear to be the latest snapshot. Current active "
"image: %(active_file)s.")
raise exception.InvalidSnapshot(
msg % dict(snapshot_id=snapshot.id,
active_file=active_file,
volume_id=volume.id))
snapshot_path = os.path.join(
self._local_volume_dir(snapshot.volume), snapshot_file)
backing_filename = self._qemu_img_info(
snapshot_path, volume.name).backing_file
# We revert the volume to the latest snapshot by recreating the top
# image from the chain.
# This workflow should work with most (if not all) drivers inheriting
# this class.
self._delete(snapshot_path)
self._do_create_snapshot(snapshot, backing_filename, snapshot_path)
class RemoteFSManageableVolumesMixin(object):
_SUPPORTED_IMAGE_FORMATS = ['raw', 'qcow2']
_MANAGEABLE_IMAGE_RE = None
def _get_manageable_vol_location(self, existing_ref):
if 'source-name' not in existing_ref:
reason = _('The existing volume reference '
'must contain "source-name".')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
vol_remote_path = os.path.normcase(
os.path.normpath(existing_ref['source-name']))
for mounted_share in self._mounted_shares:
# We don't currently attempt to resolve hostnames. This could
# be troublesome for some distributed shares, which may have
# hostnames resolving to multiple addresses.
norm_share = os.path.normcase(os.path.normpath(mounted_share))
head, match, share_rel_path = vol_remote_path.partition(norm_share)
if not (match and share_rel_path.startswith(os.path.sep)):
continue
mountpoint = self._get_mount_point_for_share(mounted_share)
vol_local_path = os.path.join(mountpoint,
share_rel_path.lstrip(os.path.sep))
LOG.debug("Found mounted share referenced by %s.",
vol_remote_path)
if os.path.isfile(vol_local_path):
LOG.debug("Found volume %(path)s on share %(share)s.",
dict(path=vol_local_path, share=mounted_share))
return dict(share=mounted_share,
mountpoint=mountpoint,
vol_local_path=vol_local_path,
vol_remote_path=vol_remote_path)
else:
LOG.error("Could not find volume %s on the "
"specified share.", vol_remote_path)
break
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=_('Volume not found.'))
def _get_managed_vol_expected_path(self, volume, volume_location):
# This may be overridden by the drivers.
return os.path.join(volume_location['mountpoint'],
volume.name)
def _is_volume_manageable(self, volume_path, already_managed=False):
unmanageable_reason = None
if already_managed:
return False, _('Volume already managed.')
try:
img_info = self._qemu_img_info(volume_path, volume_name=None)
except exception.RemoteFSInvalidBackingFile:
return False, _("Backing file present.")
except Exception:
return False, _("Failed to open image.")
# We're double checking as some drivers do not validate backing
# files through '_qemu_img_info'.
if img_info.backing_file:
return False, _("Backing file present.")
if img_info.file_format not in self._SUPPORTED_IMAGE_FORMATS:
unmanageable_reason = _(
"Unsupported image format: '%s'.") % img_info.file_format
return False, unmanageable_reason
return True, None
def manage_existing(self, volume, existing_ref):
LOG.info('Managing volume %(volume_id)s with ref %(ref)s',
{'volume_id': volume.id, 'ref': existing_ref})
vol_location = self._get_manageable_vol_location(existing_ref)
vol_local_path = vol_location['vol_local_path']
manageable, unmanageable_reason = self._is_volume_manageable(
vol_local_path)
if not manageable:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=unmanageable_reason)
expected_vol_path = self._get_managed_vol_expected_path(
volume, vol_location)
self._set_rw_permissions(vol_local_path)
# This should be the last thing we do.
if expected_vol_path != vol_local_path:
LOG.info("Renaming imported volume image %(src)s to %(dest)s",
dict(src=vol_location['vol_local_path'],
dest=expected_vol_path))
os.rename(vol_location['vol_local_path'],
expected_vol_path)
return {'provider_location': vol_location['share']}
def _get_rounded_manageable_image_size(self, image_path):
image_size = image_utils.qemu_img_info(
image_path, run_as_root=self._execute_as_root).virtual_size
return int(math.ceil(float(image_size) / units.Gi))
def manage_existing_get_size(self, volume, existing_ref):
vol_location = self._get_manageable_vol_location(existing_ref)
volume_path = vol_location['vol_local_path']
return self._get_rounded_manageable_image_size(volume_path)
def unmanage(self, volume):
pass
def _get_manageable_volume(self, share, volume_path, managed_volume=None):
manageable, unmanageable_reason = self._is_volume_manageable(
volume_path, already_managed=managed_volume is not None)
size_gb = None
if managed_volume:
# We may not be able to query in-use images.
size_gb = managed_volume.size
else:
try:
size_gb = self._get_rounded_manageable_image_size(volume_path)
except Exception:
manageable = False
unmanageable_reason = (unmanageable_reason or
_("Failed to get size."))
mountpoint = self._get_mount_point_for_share(share)
norm_mountpoint = os.path.normcase(os.path.normpath(mountpoint))
norm_vol_path = os.path.normcase(os.path.normpath(volume_path))
ref = norm_vol_path.replace(norm_mountpoint, share).replace('\\', '/')
manageable_volume = {
'reference': {'source-name': ref},
'size': size_gb,
'safe_to_manage': manageable,
'reason_not_safe': unmanageable_reason,
'cinder_id': managed_volume.id if managed_volume else None,
'extra_info': None,
}
return manageable_volume
def _get_share_manageable_volumes(self, share, managed_volumes):
manageable_volumes = []
mount_path = self._get_mount_point_for_share(share)
for dir_path, dir_names, file_names in os.walk(mount_path):
for file_name in file_names:
file_name = os.path.normcase(file_name)
img_path = os.path.join(dir_path, file_name)
# In the future, we may have the regex filtering images
# as a config option.
if (not self._MANAGEABLE_IMAGE_RE or
self._MANAGEABLE_IMAGE_RE.match(file_name)):
managed_volume = managed_volumes.get(
os.path.splitext(file_name)[0])
try:
manageable_volume = self._get_manageable_volume(
share, img_path, managed_volume)
manageable_volumes.append(manageable_volume)
except Exception as exc:
LOG.error(
"Failed to get manageable volume info: "
"'%(image_path)s'. Exception: %(exc)s.",
dict(image_path=img_path, exc=exc))
return manageable_volumes
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
manageable_volumes = []
managed_volumes = {vol.name: vol for vol in cinder_volumes}
for share in self._mounted_shares:
try:
manageable_volumes += self._get_share_manageable_volumes(
share, managed_volumes)
except Exception as exc:
LOG.error("Failed to get manageable volumes for "
"share %(share)s. Exception: %(exc)s.",
dict(share=share, exc=exc))
return volume_utils.paginate_entries_list(
manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)
|
mahak/cinder
|
cinder/volume/drivers/remotefs.py
|
Python
|
apache-2.0
| 91,429
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG demonstrating the usage of labels with different branches.
"""
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.utils.dates import days_ago
from airflow.utils.edgemodifier import Label
with DAG("example_branch_labels", schedule_interval="@daily", start_date=days_ago(2)) as dag:
ingest = DummyOperator(task_id="ingest")
analyse = DummyOperator(task_id="analyze")
check = DummyOperator(task_id="check_integrity")
describe = DummyOperator(task_id="describe_integrity")
error = DummyOperator(task_id="email_error")
save = DummyOperator(task_id="save")
report = DummyOperator(task_id="report")
ingest >> analyse >> check
check >> Label("No errors") >> save >> report # pylint: disable=expression-not-assigned
check >> Label("Errors found") >> describe >> error >> report # pylint: disable=expression-not-assigned
|
nathanielvarona/airflow
|
airflow/example_dags/example_branch_labels.py
|
Python
|
apache-2.0
| 1,697
|
""" Tests for the api_admin app's views. """
import json
import ddt
import httpretty
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from oauth2_provider.models import get_application_model
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest, ApiAccessConfig
from openedx.core.djangoapps.api_admin.tests.factories import (
ApiAccessRequestFactory, ApplicationFactory, CatalogFactory
)
from openedx.core.djangoapps.api_admin.tests.utils import VALID_DATA
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import UserFactory
Application = get_application_model() # pylint: disable=invalid-name
class ApiAdminTest(TestCase):
"""
Base class to allow API admin access to tests.
"""
def setUp(self):
super(ApiAdminTest, self).setUp()
ApiAccessConfig(enabled=True).save()
@skip_unless_lms
class ApiRequestViewTest(ApiAdminTest):
"""
Test the API Request View.
"""
def setUp(self):
super(ApiRequestViewTest, self).setUp()
self.url = reverse('api_admin:api-request')
password = 'abc123'
self.user = UserFactory(password=password)
self.client.login(username=self.user.username, password=password)
def test_get(self):
"""Verify that a logged-in can see the API request form."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_get_anonymous(self):
"""Verify that users must be logged in to see the page."""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_get_with_existing_request(self):
"""
Verify that users who have already requested access are redirected
to the client creation page to see their status.
"""
ApiAccessRequestFactory(user=self.user)
response = self.client.get(self.url)
self.assertRedirects(response, reverse('api_admin:api-status'))
def _assert_post_success(self, response):
"""
Assert that a successful POST has been made, that the response
redirects correctly, and that the correct object has been created.
"""
self.assertRedirects(response, reverse('api_admin:api-status'))
api_request = ApiAccessRequest.objects.get(user=self.user)
self.assertEqual(api_request.status, ApiAccessRequest.PENDING)
return api_request
def test_post_valid(self):
"""Verify that a logged-in user can create an API request."""
self.assertFalse(ApiAccessRequest.objects.all().exists())
response = self.client.post(self.url, VALID_DATA)
self._assert_post_success(response)
def test_post_anonymous(self):
"""Verify that users must be logged in to create an access request."""
self.client.logout()
response = self.client.post(self.url, VALID_DATA)
self.assertEqual(response.status_code, 302)
self.assertFalse(ApiAccessRequest.objects.all().exists())
def test_get_with_feature_disabled(self):
"""Verify that the view can be disabled via ApiAccessConfig."""
ApiAccessConfig(enabled=False).save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_post_with_feature_disabled(self):
"""Verify that the view can be disabled via ApiAccessConfig."""
ApiAccessConfig(enabled=False).save()
response = self.client.post(self.url)
self.assertEqual(response.status_code, 404)
@skip_unless_lms
@override_settings(PLATFORM_NAME='edX')
@ddt.ddt
class ApiRequestStatusViewTest(ApiAdminTest):
"""
Tests of the API Status endpoint.
"""
def setUp(self):
super(ApiRequestStatusViewTest, self).setUp()
password = 'abc123'
self.user = UserFactory(password=password)
self.client.login(username=self.user.username, password=password)
self.url = reverse('api_admin:api-status')
def test_get_without_request(self):
"""
Verify that users who have not yet requested API access are
redirected to the API request form.
"""
response = self.client.get(self.url)
self.assertRedirects(response, reverse('api_admin:api-request'))
@ddt.data(
(ApiAccessRequest.APPROVED, 'Your request to access the edX Course Catalog API has been approved.'),
(ApiAccessRequest.PENDING, 'Your request to access the edX Course Catalog API is being processed.'),
(ApiAccessRequest.DENIED, 'Your request to access the edX Course Catalog API has been denied.'),
)
@ddt.unpack
def test_get_with_request(self, status, expected):
"""
Verify that users who have requested access can see a message
regarding their request status.
"""
ApiAccessRequestFactory(user=self.user, status=status)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertIn(expected, response.content)
def test_get_with_existing_application(self):
"""
Verify that if the user has created their client credentials, they
are shown on the status page.
"""
ApiAccessRequestFactory(user=self.user, status=ApiAccessRequest.APPROVED)
application = ApplicationFactory(user=self.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
unicode_content = response.content.decode('utf-8')
self.assertIn(application.client_secret, unicode_content) # pylint: disable=no-member
self.assertIn(application.client_id, unicode_content) # pylint: disable=no-member
self.assertIn(application.redirect_uris, unicode_content) # pylint: disable=no-member
def test_get_anonymous(self):
"""Verify that users must be logged in to see the page."""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_get_with_feature_disabled(self):
"""Verify that the view can be disabled via ApiAccessConfig."""
ApiAccessConfig(enabled=False).save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
@ddt.data(
(ApiAccessRequest.APPROVED, True, True),
(ApiAccessRequest.DENIED, True, False),
(ApiAccessRequest.PENDING, True, False),
(ApiAccessRequest.APPROVED, False, True),
(ApiAccessRequest.DENIED, False, False),
(ApiAccessRequest.PENDING, False, False),
)
@ddt.unpack
def test_post(self, status, application_exists, new_application_created):
"""
Verify that posting the form creates an application if the user is
approved, and does not otherwise. Also ensure that if the user
already has an application, it is deleted before a new
application is created.
"""
if application_exists:
old_application = ApplicationFactory(user=self.user)
ApiAccessRequestFactory(user=self.user, status=status)
self.client.post(self.url, {
'name': 'test.com',
'redirect_uris': 'http://example.com'
})
applications = Application.objects.filter(user=self.user)
if application_exists and new_application_created:
self.assertEqual(applications.count(), 1)
self.assertNotEqual(old_application, applications[0])
elif application_exists:
self.assertEqual(applications.count(), 1)
self.assertEqual(old_application, applications[0])
elif new_application_created:
self.assertEqual(applications.count(), 1)
else:
self.assertEqual(applications.count(), 0)
def test_post_with_errors(self):
ApiAccessRequestFactory(user=self.user, status=ApiAccessRequest.APPROVED)
response = self.client.post(self.url, {
'name': 'test.com',
'redirect_uris': 'not a url'
})
self.assertIn('Enter a valid URL.', response.content)
@skip_unless_lms
class ApiTosViewTest(ApiAdminTest):
"""
Tests of the API terms of service endpoint.
"""
def test_get_api_tos(self):
"""
Verify that the terms of service can be read.
"""
url = reverse('api_admin:api-tos')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('Terms of Service', response.content)
class CatalogTest(ApiAdminTest):
"""
Test the catalog API.
"""
def setUp(self):
super(CatalogTest, self).setUp()
password = 'abc123'
self.user = UserFactory(password=password, is_staff=True)
self.client.login(username=self.user.username, password=password)
def mock_catalog_endpoint(self, data, catalog_id=None, method=httpretty.GET, status_code=200):
""" Mock the Course Catalog API's catalog endpoint. """
self.assertTrue(httpretty.is_enabled(), msg='httpretty must be enabled to mock Catalog API calls.')
url = '{root}/catalogs/'.format(root=settings.COURSE_CATALOG_API_URL.rstrip('/'))
if catalog_id:
url += '{id}/'.format(id=catalog_id)
httpretty.register_uri(
method,
url,
body=json.dumps(data),
content_type='application/json',
status=status_code
)
@skip_unless_lms
class CatalogSearchViewTest(CatalogTest):
"""
Test the catalog search endpoint.
"""
def setUp(self):
super(CatalogSearchViewTest, self).setUp()
self.url = reverse('api_admin:catalog-search')
def test_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
@httpretty.activate
def test_post(self):
catalog_user = UserFactory()
self.mock_catalog_endpoint({'results': []})
response = self.client.post(self.url, {'username': catalog_user.username})
self.assertRedirects(response, reverse('api_admin:catalog-list', kwargs={'username': catalog_user.username}))
def test_post_without_username(self):
response = self.client.post(self.url, {'username': ''})
self.assertRedirects(response, reverse('api_admin:catalog-search'))
@skip_unless_lms
class CatalogListViewTest(CatalogTest):
"""
Test the catalog list endpoint.
"""
def setUp(self):
super(CatalogListViewTest, self).setUp()
self.catalog_user = UserFactory()
self.url = reverse('api_admin:catalog-list', kwargs={'username': self.catalog_user.username})
@httpretty.activate
def test_get(self):
catalog = CatalogFactory(viewers=[self.catalog_user.username])
self.mock_catalog_endpoint({'results': [catalog.attributes]})
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertIn(catalog.name, response.content.decode('utf-8'))
@httpretty.activate
def test_get_no_catalogs(self):
"""Verify that the view works when no catalogs are set up."""
self.mock_catalog_endpoint({}, status_code=404)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
@httpretty.activate
def test_post(self):
catalog_data = {
'name': 'test-catalog',
'query': '*',
'viewers': [self.catalog_user.username]
}
catalog_id = 123
self.mock_catalog_endpoint(dict(catalog_data, id=catalog_id), method=httpretty.POST)
response = self.client.post(self.url, catalog_data)
self.assertEqual(httpretty.last_request().method, 'POST')
self.mock_catalog_endpoint(CatalogFactory().attributes, catalog_id=catalog_id)
self.assertRedirects(response, reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog_id}))
@httpretty.activate
def test_post_invalid(self):
catalog = CatalogFactory(viewers=[self.catalog_user.username])
self.mock_catalog_endpoint({'results': [catalog.attributes]})
response = self.client.post(self.url, {
'name': '',
'query': '*',
'viewers': [self.catalog_user.username]
})
self.assertEqual(response.status_code, 400)
# Assert that no POST was made to the catalog API
self.assertEqual(len([r for r in httpretty.httpretty.latest_requests if r.method == 'POST']), 0)
@skip_unless_lms
class CatalogEditViewTest(CatalogTest):
"""
Test edits to the catalog endpoint.
"""
def setUp(self):
super(CatalogEditViewTest, self).setUp()
self.catalog_user = UserFactory()
self.catalog = CatalogFactory(viewers=[self.catalog_user.username])
self.url = reverse('api_admin:catalog-edit', kwargs={'catalog_id': self.catalog.id})
@httpretty.activate
def test_get(self):
self.mock_catalog_endpoint(self.catalog.attributes, catalog_id=self.catalog.id)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.catalog.name, response.content.decode('utf-8'))
@httpretty.activate
def test_delete(self):
self.mock_catalog_endpoint(
self.catalog.attributes,
method=httpretty.DELETE,
catalog_id=self.catalog.id
)
response = self.client.post(self.url, {'delete-catalog': 'on'})
self.assertRedirects(response, reverse('api_admin:catalog-search'))
self.assertEqual(httpretty.last_request().method, 'DELETE')
self.assertEqual(
httpretty.last_request().path,
'/api/v1/catalogs/{}/'.format(self.catalog.id)
)
self.assertEqual(len(httpretty.httpretty.latest_requests), 1)
@httpretty.activate
def test_edit(self):
self.mock_catalog_endpoint(self.catalog.attributes, method=httpretty.PATCH, catalog_id=self.catalog.id)
new_attributes = dict(self.catalog.attributes, **{'delete-catalog': 'off', 'name': 'changed'})
response = self.client.post(self.url, new_attributes)
self.mock_catalog_endpoint(new_attributes, catalog_id=self.catalog.id)
self.assertRedirects(response, reverse('api_admin:catalog-edit', kwargs={'catalog_id': self.catalog.id}))
@httpretty.activate
def test_edit_invalid(self):
self.mock_catalog_endpoint(self.catalog.attributes, catalog_id=self.catalog.id)
new_attributes = dict(self.catalog.attributes, **{'delete-catalog': 'off', 'name': ''})
response = self.client.post(self.url, new_attributes)
self.assertEqual(response.status_code, 400)
# Assert that no PATCH was made to the Catalog API
self.assertEqual(len([r for r in httpretty.httpretty.latest_requests if r.method == 'PATCH']), 0)
@skip_unless_lms
class CatalogPreviewViewTest(CatalogTest):
"""
Test the catalog preview endpoint.
"""
def setUp(self):
super(CatalogPreviewViewTest, self).setUp()
self.url = reverse('api_admin:catalog-preview')
@httpretty.activate
def test_get(self):
data = {'count': 1, 'results': ['test data'], 'next': None, 'prev': None}
httpretty.register_uri(
httpretty.GET,
'{root}/courses/'.format(root=settings.COURSE_CATALOG_API_URL.rstrip('/')),
body=json.dumps(data),
content_type='application/json'
)
response = self.client.get(self.url, {'q': '*'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), data)
def test_get_without_query(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {'count': 0, 'results': [], 'next': None, 'prev': None})
|
fintech-circle/edx-platform
|
openedx/core/djangoapps/api_admin/tests/test_views.py
|
Python
|
agpl-3.0
| 16,144
|
from rest_framework.throttling import SimpleRateThrottle
from django.contrib.auth.models import User
class UserLoginRateThrottle(SimpleRateThrottle):
scope = 'auth'
def get_cache_key(self, request, view):
user = User.objects.filter(username=request.data.get('username'))
ident = user[0].pk if user else self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
|
gaiaresources/biosys
|
biosys/apps/main/api/throttling.py
|
Python
|
apache-2.0
| 462
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2015 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import time
from django.utils.translation import ugettext as _
from wirecloud.commons.exceptions import HttpBadCredentials
from wirecloud.platform.plugins import WirecloudPlugin
from wirecloud.oauth2provider.models import Token
from wirecloud.oauth2provider.urls import urlpatterns
def auth_oauth2_token(auth_type, token):
token = Token.objects.get(token=token)
if (int(token.creation_timestamp) + int(token.expires_in)) <= time.time():
raise HttpBadCredentials(_('Expired access token'), 'Bearer realm="WireCloud", error="invalid_token", error_description="expired access token"')
return token.user
class OAuth2ProviderPlugin(WirecloudPlugin):
features = {
'OAuth2Provider': '0.5',
}
def get_urls(self):
return urlpatterns
def get_api_auth_backends(self):
return {
'Bearer': auth_oauth2_token,
}
|
rockneurotiko/wirecloud
|
src/wirecloud/oauth2provider/plugins.py
|
Python
|
agpl-3.0
| 1,710
|
from flask import Flask, abort
from flask.ext import restful
from flask.ext.restful import reqparse
import requests
import json
from common.puppetdb import PuppetDB
from common.fab import Fabber
class HbaseCompactResultsAPI(restful.Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('hosts', type=str, required=True, help="The targetted hosts e.g. hbase3-rs, es2-sn, etc.")
self.parser.add_argument('date', type=str, required=True, help="Date like 2014-09-15")
super(HbaseCompactResultsAPI, self).__init__()
def get(self):
args = self.parser.parse_args()
print args['hosts'], args['date']
result = Fabber().execute_command(command="grep %s /var/log/hbase/hbase-hbase-regionserver-hbase*.klout.log | grep -r CompactionRequest | grep completed | awk '{ print $1 \" \" $2 \"#\" $7 \"#\" $10$11 \"#\" $14$15}' | sed 's/#duration=/##/g' | sed 's/#fileSize=/##/g' | sed 's/#regionName=/##/g' | sed 's/^##//g'" % args['date']
, hosts=PuppetDB().find_nodes(args['hosts']))
lines = []
for l in result:
lines.extend(l.split('\n'))
result = []
for l in lines:
result.append(tuple(l.split('##')))
return result
|
nerd0/operator
|
resources/hbase.py
|
Python
|
mit
| 1,288
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_comments', '__first__'),
]
operations = [
migrations.CreateModel(
name='FluentComment',
fields=[
],
options={
'proxy': True,
'managed': False,
},
bases=('django_comments.comment',),
),
]
|
steventimberman/masterDebater
|
venv/lib/python2.7/site-packages/fluent_comments/migrations/0001_initial.py
|
Python
|
mit
| 506
|
#!/usr/bin/python
##########################################################
# * Python code for Driving a Stepper Motor Card using
# * Beaglebone Black running Debian 7 Linux distribution
##########################################################
import sys
import time
import select
from stepperpins import *
from gpio import *
def stepperExit (gpio):
gpioSetVal(gpio, val="0")
gpioUnexport(gpio)
return
def stepperInit (gpio):
gpioExport(gpio)
gpioSetDir(gpio, flag="out")
gpioSetVal(gpio, val="0")
return
def stepperOn (gpio):
gpioSetVal(gpio, val="1")
return
def stepperOff (gpio):
gpioSetVal(gpio, val="0")
return
def stepperInitAll():
stepperInit(str(STEPPER_1))
stepperInit(str(STEPPER_2))
stepperInit(str(STEPPER_3))
stepperInit(str(STEPPER_4))
def stepperExitAll():
stepperExit(str(STEPPER_1))
stepperExit(str(STEPPER_2))
stepperExit(str(STEPPER_3))
stepperExit(str(STEPPER_4))
print "\n=== Demonstration END ===\n"
return
def stepperSeq5():
stepperOn(str(STEPPER_1))
time.sleep(0.0001)
stepperOff(str(STEPPER_2))
time.sleep(0.0001)
stepperOn(str(STEPPER_3))
time.sleep(0.0001)
stepperOff(str(STEPPER_4))
time.sleep(0.0001)
return
def stepperSeq9():
stepperOn(str(STEPPER_1))
time.sleep(0.0001)
stepperOff(str(STEPPER_2))
time.sleep(0.0001)
stepperOff(str(STEPPER_3))
time.sleep(0.0001)
stepperOn(str(STEPPER_4))
time.sleep(0.0001)
return
def stepperSeq10():
stepperOff(str(STEPPER_1))
time.sleep(0.0001)
stepperOn(str(STEPPER_2))
time.sleep(0.0001)
stepperOff(str(STEPPER_3))
time.sleep(0.0001)
stepperOn(str(STEPPER_4))
time.sleep(0.0001)
return
def stepperSeq6():
stepperOff(str(STEPPER_1))
time.sleep(0.0001)
stepperOn(str(STEPPER_2))
time.sleep(0.0001)
stepperOn(str(STEPPER_3))
time.sleep(0.0001)
stepperOff(str(STEPPER_4))
time.sleep(0.0001)
return
def stepperDirLeft():
stepperSeq5()
time.sleep(0.01)
stepperSeq9()
time.sleep(0.01)
stepperSeq10()
time.sleep(0.01)
stepperSeq6()
time.sleep(0.01)
return
def stepperDirRight():
stepperSeq6()
time.sleep(0.01)
stepperSeq10()
time.sleep(0.01)
stepperSeq9()
time.sleep(0.01)
stepperSeq5()
time.sleep(0.01)
return
try:
print "\nStepper Motor Driver using Python\n"
print "-----------------------------------------------\n"
stepperInitAll()
while True:
for i in range(0, 12):
stepperDirLeft()
time.sleep(3)
for i in range(0, 12):
stepperDirRight()
time.sleep(3)
stepperExitAll
exit()
except KeyboardInterrupt:
stepperExitAll()
print "Program Exit due to CTRL-C"
exit()
sys.exit(0)
|
souravsingh/beaglebone-codes
|
09stepper_python/stepper.py
|
Python
|
apache-2.0
| 2,749
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.